feat: update deployment configuration and encrypted env loader
- Update Ansible playbooks and roles for application deployment - Add new Gitea/Traefik troubleshooting playbooks - Update Docker Compose configurations (base, local, staging, production) - Enhance EncryptedEnvLoader with improved error handling - Add deployment scripts (autossh setup, migration, secret testing) - Update CI/CD workflows and documentation - Add Semaphore stack configuration
This commit is contained in:
@@ -4,6 +4,33 @@
|
||||
|
||||
This deployment setup uses separate Docker Compose stacks for better maintainability and clear separation of concerns.
|
||||
|
||||
### Docker Compose Structure
|
||||
|
||||
The project uses a **Base + Override Pattern** to prevent configuration drift between environments:
|
||||
|
||||
- **`docker-compose.base.yml`** - Shared base configuration (services, networks, volumes)
|
||||
- **`docker-compose.local.yml`** - Local development overrides (ports, host mounts, debug flags)
|
||||
- **`docker-compose.staging.yml`** - Staging environment overrides (Traefik labels, staging volumes)
|
||||
- **`docker-compose.production.yml`** - Production environment overrides (security, logging, resources)
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
# Local development
|
||||
docker compose -f docker-compose.base.yml -f docker-compose.local.yml up
|
||||
|
||||
# Staging
|
||||
docker compose -f docker-compose.base.yml -f docker-compose.staging.yml up
|
||||
|
||||
# Production
|
||||
docker compose -f docker-compose.base.yml -f docker-compose.production.yml up
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- ✅ Single source of truth for shared configuration
|
||||
- ✅ Environment-specific differences clearly visible
|
||||
- ✅ Reduced configuration drift between environments
|
||||
- ✅ Easier maintenance and updates
|
||||
|
||||
### Infrastructure Components
|
||||
|
||||
```
|
||||
|
||||
@@ -62,11 +62,6 @@
|
||||
set_fact:
|
||||
server_vpn_ip: "{{ (wireguard_server_config_read.content | b64decode | regex_search('Address = ([0-9.]+)', '\\1')) | first | default('10.8.0.1') }}"
|
||||
|
||||
- name: Set default DNS servers if not provided
|
||||
set_fact:
|
||||
wireguard_dns_servers: "{{ [server_vpn_ip] }}"
|
||||
when: wireguard_dns_servers | length == 0
|
||||
|
||||
- name: Extract WireGuard server IP octets
|
||||
set_fact:
|
||||
wireguard_server_ip_octets: "{{ server_vpn_ip.split('.') }}"
|
||||
|
||||
192
deployment/ansible/playbooks/check-gitea-bad-gateway.yml
Normal file
192
deployment/ansible/playbooks/check-gitea-bad-gateway.yml
Normal file
@@ -0,0 +1,192 @@
|
||||
---
|
||||
- name: Diagnose Gitea Bad Gateway Issue
|
||||
hosts: production
|
||||
gather_facts: yes
|
||||
become: no
|
||||
|
||||
vars:
|
||||
gitea_stack_path: "{{ stacks_base_path }}/gitea"
|
||||
|
||||
tasks:
|
||||
- name: Check if Gitea stack directory exists
|
||||
stat:
|
||||
path: "{{ gitea_stack_path }}"
|
||||
register: gitea_stack_dir
|
||||
|
||||
- name: Display Gitea stack directory status
|
||||
debug:
|
||||
msg: "Gitea stack path: {{ gitea_stack_path }} - Exists: {{ gitea_stack_dir.stat.exists }}"
|
||||
|
||||
- name: Check Gitea container status
|
||||
shell: |
|
||||
cd {{ gitea_stack_path }}
|
||||
echo "=== Gitea Container Status ==="
|
||||
docker compose ps 2>&1 || echo "Could not check container status"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: gitea_status
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
when: gitea_stack_dir.stat.exists
|
||||
|
||||
- name: Display Gitea container status
|
||||
debug:
|
||||
msg: "{{ gitea_status.stdout_lines }}"
|
||||
when: gitea_stack_dir.stat.exists
|
||||
|
||||
- name: Check if Gitea container is running
|
||||
shell: |
|
||||
docker ps --filter name=gitea --format "{{ '{{' }}.Names{{ '}}' }}: {{ '{{' }}.Status{{ '}}' }}"
|
||||
register: gitea_running
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display Gitea running status
|
||||
debug:
|
||||
msg: "{{ gitea_running.stdout_lines if gitea_running.stdout else 'Gitea container not found' }}"
|
||||
|
||||
- name: Check Gitea logs (last 50 lines)
|
||||
shell: |
|
||||
cd {{ gitea_stack_path }}
|
||||
echo "=== Gitea Logs (Last 50 lines) ==="
|
||||
docker compose logs --tail=50 gitea 2>&1 || echo "Could not read Gitea logs"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: gitea_logs
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
when: gitea_stack_dir.stat.exists
|
||||
|
||||
- name: Display Gitea logs
|
||||
debug:
|
||||
msg: "{{ gitea_logs.stdout_lines }}"
|
||||
when: gitea_stack_dir.stat.exists
|
||||
|
||||
- name: Check Gitea container health
|
||||
shell: |
|
||||
docker inspect gitea --format '{{ '{{' }}.State.Health.Status{{ '}}' }}' 2>&1 || echo "Could not check health"
|
||||
register: gitea_health
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display Gitea health status
|
||||
debug:
|
||||
msg: "Gitea health: {{ gitea_health.stdout }}"
|
||||
|
||||
- name: Test Gitea health endpoint from container
|
||||
shell: |
|
||||
docker exec gitea curl -f http://localhost:3000/api/healthz 2>&1 || echo "Health check failed"
|
||||
register: gitea_internal_health
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display internal health check result
|
||||
debug:
|
||||
msg: "{{ gitea_internal_health.stdout_lines }}"
|
||||
|
||||
- name: Check if Gitea is reachable from Traefik network
|
||||
shell: |
|
||||
docker exec traefik curl -f http://gitea:3000/api/healthz 2>&1 || echo "Could not reach Gitea from Traefik network"
|
||||
register: gitea_from_traefik
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display Traefik to Gitea connectivity
|
||||
debug:
|
||||
msg: "{{ gitea_from_traefik.stdout_lines }}"
|
||||
|
||||
- name: Check Traefik logs for Gitea errors
|
||||
shell: |
|
||||
cd {{ stacks_base_path }}/traefik
|
||||
echo "=== Traefik Logs - Gitea related (Last 30 lines) ==="
|
||||
docker compose logs --tail=100 traefik 2>&1 | grep -i "gitea" | tail -30 || echo "No Gitea-related logs found"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: traefik_gitea_logs
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display Traefik Gitea logs
|
||||
debug:
|
||||
msg: "{{ traefik_gitea_logs.stdout_lines }}"
|
||||
|
||||
- name: Check Docker networks
|
||||
shell: |
|
||||
echo "=== Docker Networks ==="
|
||||
docker network ls
|
||||
echo ""
|
||||
echo "=== Traefik Network Details ==="
|
||||
docker network inspect traefik-public 2>&1 | grep -E "(Name|Subnet|Containers|gitea)" || echo "Could not inspect traefik-public network"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: network_info
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display network info
|
||||
debug:
|
||||
msg: "{{ network_info.stdout_lines }}"
|
||||
|
||||
- name: Check if Gitea is in traefik-public network
|
||||
shell: |
|
||||
docker network inspect traefik-public 2>&1 | grep -i "gitea" || echo "Gitea not found in traefik-public network"
|
||||
register: gitea_in_network
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display Gitea network membership
|
||||
debug:
|
||||
msg: "{{ gitea_in_network.stdout_lines }}"
|
||||
|
||||
- name: Check Gitea container configuration
|
||||
shell: |
|
||||
echo "=== Gitea Container Labels ==="
|
||||
docker inspect gitea --format '{{ '{{' }}range .Config.Labels{{ '}}' }}{{ '{{' }}.Key{{ '}}' }}={{ '{{' }}.Value{{ '}}' }}{{ '{{' }}\n{{ '}}' }}{{ '{{' }}end{{ '}}' }}' 2>&1 | grep -i traefik || echo "No Traefik labels found"
|
||||
register: gitea_labels
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display Gitea labels
|
||||
debug:
|
||||
msg: "{{ gitea_labels.stdout_lines }}"
|
||||
|
||||
- name: Check Traefik service registration
|
||||
shell: |
|
||||
docker exec traefik wget -qO- http://localhost:8080/api/http/services 2>&1 | grep -i gitea || echo "Gitea service not found in Traefik API"
|
||||
register: traefik_service
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display Traefik service registration
|
||||
debug:
|
||||
msg: "{{ traefik_service.stdout_lines }}"
|
||||
|
||||
- name: Test external Gitea access
|
||||
shell: |
|
||||
echo "=== Testing External Gitea Access ==="
|
||||
curl -k -H "User-Agent: Mozilla/5.0" -s -o /dev/null -w "HTTP Status: %{http_code}\n" https://git.michaelschiemer.de/ 2>&1 || echo "Connection failed"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: external_test
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display external test result
|
||||
debug:
|
||||
msg: "{{ external_test.stdout_lines }}"
|
||||
|
||||
- name: Summary
|
||||
debug:
|
||||
msg:
|
||||
- "=== DIAGNOSIS SUMMARY ==="
|
||||
- "1. Check if Gitea container is running"
|
||||
- "2. Check if Gitea is in traefik-public network"
|
||||
- "3. Check Gitea health endpoint (port 3000)"
|
||||
- "4. Check Traefik can reach Gitea"
|
||||
- "5. Check Traefik logs for errors"
|
||||
- ""
|
||||
- "Common issues:"
|
||||
- "- Container not running: Restart with 'docker compose up -d' in {{ gitea_stack_path }}"
|
||||
- "- Not in network: Recreate container or add to network"
|
||||
- "- Health check failing: Check Gitea logs for errors"
|
||||
- "- Traefik can't reach: Check network configuration"
|
||||
70
deployment/ansible/playbooks/check-traefik-gitea-config.yml
Normal file
70
deployment/ansible/playbooks/check-traefik-gitea-config.yml
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
- name: Check Traefik Gitea Configuration
|
||||
hosts: production
|
||||
gather_facts: yes
|
||||
become: no
|
||||
|
||||
vars:
|
||||
traefik_stack_path: "{{ stacks_base_path }}/traefik"
|
||||
|
||||
tasks:
|
||||
- name: Check Traefik logs for Gitea errors
|
||||
shell: |
|
||||
cd {{ traefik_stack_path }}
|
||||
echo "=== Traefik Logs - Gitea errors (Last 50 lines) ==="
|
||||
docker compose logs --tail=100 traefik 2>&1 | grep -i "gitea\|502\|bad gateway" | tail -50 || echo "No Gitea-related errors found"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: traefik_errors
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display Traefik errors
|
||||
debug:
|
||||
msg: "{{ traefik_errors.stdout_lines }}"
|
||||
|
||||
- name: Check dynamic Gitea configuration on server
|
||||
shell: |
|
||||
cat {{ traefik_stack_path }}/dynamic/gitea.yml 2>&1 || echo "File not found"
|
||||
register: gitea_dynamic_config
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display dynamic Gitea config
|
||||
debug:
|
||||
msg: "{{ gitea_dynamic_config.stdout_lines }}"
|
||||
|
||||
- name: Test if Traefik can resolve gitea hostname
|
||||
shell: |
|
||||
docker exec traefik getent hosts gitea 2>&1 || echo "Cannot resolve gitea hostname"
|
||||
register: traefik_resolve
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display Traefik resolve result
|
||||
debug:
|
||||
msg: "{{ traefik_resolve.stdout_lines }}"
|
||||
|
||||
- name: Get Gitea container IP
|
||||
shell: |
|
||||
docker inspect gitea --format '{{ '{{' }}range.NetworkSettings.Networks{{ '}}' }}{{ '{{' }}if eq .NetworkID (printf "%s" (docker network inspect traefik-public --format "{{ '{{' }}.Id{{ '}}' }}")){{ '}}' }}{{ '{{' }}.IPAddress{{ '}}' }}{{ '{{' }}end{{ '}}' }}{{ '{{' }}end{{ '}}' }}' 2>&1 || echo "Could not get IP"
|
||||
register: gitea_ip
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display Gitea IP
|
||||
debug:
|
||||
msg: "Gitea IP in traefik-public network: {{ gitea_ip.stdout }}"
|
||||
|
||||
- name: Test connectivity from Traefik to Gitea IP
|
||||
shell: |
|
||||
GITEA_IP="{{ gitea_ip.stdout | default('172.21.0.3') }}"
|
||||
docker exec traefik wget -qO- --timeout=5 "http://$GITEA_IP:3000/api/healthz" 2>&1 || echo "Cannot connect to Gitea at $GITEA_IP:3000"
|
||||
register: traefik_connect
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
when: gitea_ip.stdout is defined and gitea_ip.stdout != ""
|
||||
|
||||
- name: Display connectivity result
|
||||
debug:
|
||||
msg: "{{ traefik_connect.stdout_lines }}"
|
||||
@@ -50,21 +50,34 @@
|
||||
group: "{{ ansible_user }}"
|
||||
mode: '0755'
|
||||
|
||||
- name: Check if docker-compose.yml exists in application stack
|
||||
- name: Check if docker-compose.base.yml exists in application stack
|
||||
stat:
|
||||
path: "{{ app_stack_path }}/docker-compose.yml"
|
||||
register: compose_file_exists
|
||||
path: "{{ app_stack_path }}/docker-compose.base.yml"
|
||||
register: compose_base_exists
|
||||
when: not (application_sync_files | default(false) | bool)
|
||||
|
||||
- name: Fail if docker-compose.yml doesn't exist
|
||||
- name: Check if docker-compose.production.yml exists in application stack
|
||||
stat:
|
||||
path: "{{ app_stack_path }}/docker-compose.production.yml"
|
||||
register: compose_prod_exists
|
||||
when: not (application_sync_files | default(false) | bool)
|
||||
|
||||
- name: Fail if docker-compose files don't exist
|
||||
fail:
|
||||
msg: |
|
||||
Application Stack docker-compose.yml not found at {{ app_stack_path }}/docker-compose.yml
|
||||
Application Stack docker-compose files not found at {{ app_stack_path }}
|
||||
|
||||
Required files:
|
||||
- docker-compose.base.yml
|
||||
- docker-compose.production.yml
|
||||
|
||||
The Application Stack must be deployed first via:
|
||||
ansible-playbook -i inventory/production.yml playbooks/setup-infrastructure.yml
|
||||
|
||||
This will create the application stack with docker-compose.yml and .env file.
|
||||
when: not compose_file_exists.stat.exists
|
||||
This will create the application stack with docker-compose files and .env file.
|
||||
when:
|
||||
- not (application_sync_files | default(false) | bool)
|
||||
- (not compose_base_exists.stat.exists or not compose_prod_exists.stat.exists)
|
||||
|
||||
- name: Create backup directory
|
||||
file:
|
||||
@@ -75,31 +88,47 @@
|
||||
mode: '0755'
|
||||
|
||||
tasks:
|
||||
- name: Verify docker-compose.yml exists
|
||||
- name: Verify docker-compose files exist
|
||||
stat:
|
||||
path: "{{ app_stack_path }}/docker-compose.yml"
|
||||
register: compose_file_check
|
||||
path: "{{ app_stack_path }}/docker-compose.base.yml"
|
||||
register: compose_base_check
|
||||
when: not (application_sync_files | default(false) | bool)
|
||||
|
||||
- name: Verify docker-compose.production.yml exists
|
||||
stat:
|
||||
path: "{{ app_stack_path }}/docker-compose.production.yml"
|
||||
register: compose_prod_check
|
||||
when: not (application_sync_files | default(false) | bool)
|
||||
|
||||
- name: Fail if docker-compose.yml doesn't exist
|
||||
- name: Fail if docker-compose files don't exist
|
||||
fail:
|
||||
msg: |
|
||||
Application Stack docker-compose.yml not found at {{ app_stack_path }}/docker-compose.yml
|
||||
Application Stack docker-compose files not found at {{ app_stack_path }}
|
||||
|
||||
Required files:
|
||||
- docker-compose.base.yml
|
||||
- docker-compose.production.yml
|
||||
|
||||
The Application Stack must be deployed first via:
|
||||
ansible-playbook -i inventory/production.yml playbooks/setup-infrastructure.yml
|
||||
|
||||
This will create the application stack with docker-compose.yml and .env file.
|
||||
when: not compose_file_check.stat.exists
|
||||
This will create the application stack with docker-compose files and .env file.
|
||||
when:
|
||||
- not (application_sync_files | default(false) | bool)
|
||||
- (not compose_base_check.stat.exists or not compose_prod_check.stat.exists)
|
||||
|
||||
- name: Backup current deployment metadata
|
||||
shell: |
|
||||
docker compose -f {{ app_stack_path }}/docker-compose.yml ps --format json 2>/dev/null > {{ backups_path }}/{{ deployment_timestamp | regex_replace(':', '-') }}/current_containers.json || true
|
||||
docker compose -f {{ app_stack_path }}/docker-compose.yml config 2>/dev/null > {{ backups_path }}/{{ deployment_timestamp | regex_replace(':', '-') }}/docker-compose-config.yml || true
|
||||
docker compose -f {{ app_stack_path }}/docker-compose.base.yml -f {{ app_stack_path }}/docker-compose.production.yml ps --format json 2>/dev/null > {{ backups_path }}/{{ deployment_timestamp | regex_replace(':', '-') }}/current_containers.json || true
|
||||
docker compose -f {{ app_stack_path }}/docker-compose.base.yml -f {{ app_stack_path }}/docker-compose.production.yml config 2>/dev/null > {{ backups_path }}/{{ deployment_timestamp | regex_replace(':', '-') }}/docker-compose-config.yml || true
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
ignore_errors: yes
|
||||
when: compose_file_check.stat.exists
|
||||
when:
|
||||
- not (application_sync_files | default(false) | bool)
|
||||
- compose_base_exists.stat.exists | default(false)
|
||||
- compose_prod_exists.stat.exists | default(false)
|
||||
|
||||
- name: Login to Docker registry (if credentials provided)
|
||||
community.docker.docker_login:
|
||||
@@ -128,9 +157,19 @@
|
||||
msg: "Failed to pull image {{ app_image }}:{{ image_tag }}"
|
||||
when: image_pull.failed
|
||||
|
||||
- name: Update docker-compose.yml with new image tag (all services)
|
||||
# Sync files first if application_sync_files=true (before updating docker-compose.production.yml)
|
||||
- name: Sync application stack files
|
||||
import_role:
|
||||
name: application
|
||||
vars:
|
||||
application_sync_files: "{{ application_sync_files | default(false) }}"
|
||||
application_compose_recreate: "never" # Don't recreate yet, just sync files
|
||||
application_remove_orphans: false
|
||||
when: application_sync_files | default(false) | bool
|
||||
|
||||
- name: Update docker-compose.production.yml with new image tag (all services)
|
||||
replace:
|
||||
path: "{{ app_stack_path }}/docker-compose.yml"
|
||||
path: "{{ app_stack_path }}/docker-compose.production.yml"
|
||||
# Match both localhost:5000 and registry.michaelschiemer.de (or any registry URL)
|
||||
regexp: '^(\s+image:\s+)(localhost:5000|registry\.michaelschiemer\.de|{{ docker_registry }})/{{ app_name }}:.*$'
|
||||
replace: '\1{{ app_image }}:{{ image_tag }}'
|
||||
@@ -142,13 +181,13 @@
|
||||
import_role:
|
||||
name: application
|
||||
vars:
|
||||
application_sync_files: false
|
||||
application_sync_files: false # Already synced above, don't sync again
|
||||
application_compose_recreate: "always"
|
||||
application_remove_orphans: true
|
||||
|
||||
- name: Get deployed image information
|
||||
shell: |
|
||||
docker compose -f {{ app_stack_path }}/docker-compose.yml config | grep -E "^\s+image:" | head -1 | awk '{print $2}' || echo "unknown"
|
||||
docker compose -f {{ app_stack_path }}/docker-compose.base.yml -f {{ app_stack_path }}/docker-compose.production.yml config | grep -E "^\s+image:" | head -1 | awk '{print $2}' || echo "unknown"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: deployed_image
|
||||
|
||||
@@ -0,0 +1,143 @@
|
||||
---
|
||||
- name: Fix Gitea Traefik Configuration - Remove Dynamic Config and Use Labels
|
||||
hosts: production
|
||||
gather_facts: yes
|
||||
become: no
|
||||
|
||||
vars:
|
||||
traefik_stack_path: "{{ stacks_base_path }}/traefik"
|
||||
gitea_stack_path: "{{ stacks_base_path }}/gitea"
|
||||
|
||||
tasks:
|
||||
- name: Backup dynamic Gitea configuration
|
||||
shell: |
|
||||
cd {{ traefik_stack_path }}/dynamic
|
||||
if [ -f gitea.yml ]; then
|
||||
cp gitea.yml gitea.yml.backup-$(date +%Y%m%d-%H%M%S)
|
||||
echo "Backed up to gitea.yml.backup-$(date +%Y%m%d-%H%M%S)"
|
||||
else
|
||||
echo "File not found, nothing to backup"
|
||||
fi
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: backup_result
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display backup result
|
||||
debug:
|
||||
msg: "{{ backup_result.stdout_lines }}"
|
||||
|
||||
- name: Remove dynamic Gitea configuration
|
||||
file:
|
||||
path: "{{ traefik_stack_path }}/dynamic/gitea.yml"
|
||||
state: absent
|
||||
register: remove_config
|
||||
|
||||
- name: Restart Traefik to reload configuration
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ traefik_stack_path }}"
|
||||
state: present
|
||||
pull: never
|
||||
recreate: always
|
||||
services:
|
||||
- traefik
|
||||
register: traefik_restart
|
||||
when: remove_config.changed
|
||||
|
||||
- name: Wait for Traefik to be ready
|
||||
wait_for:
|
||||
port: 443
|
||||
host: localhost
|
||||
timeout: 30
|
||||
delegate_to: localhost
|
||||
when: traefik_restart.changed
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Check if Gitea docker-compose.yml already has Traefik labels
|
||||
shell: |
|
||||
grep -q "traefik.enable=true" {{ gitea_stack_path }}/docker-compose.yml && echo "Labels already present" || echo "Labels missing"
|
||||
register: labels_check
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Copy docker-compose.yml from local to ensure labels are present
|
||||
copy:
|
||||
src: "{{ playbook_dir }}/../../stacks/gitea/docker-compose.yml"
|
||||
dest: "{{ gitea_stack_path }}/docker-compose.yml"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: '0644'
|
||||
register: labels_added
|
||||
when: "'Labels missing' in labels_check.stdout"
|
||||
|
||||
- name: Recreate Gitea container with labels
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ gitea_stack_path }}"
|
||||
state: present
|
||||
pull: never
|
||||
recreate: always
|
||||
remove_orphans: no
|
||||
register: gitea_recreate
|
||||
when: labels_added.changed
|
||||
|
||||
- name: Wait for Gitea to be healthy
|
||||
shell: |
|
||||
for i in {1..30}; do
|
||||
if docker exec gitea curl -f http://localhost:3000/api/healthz >/dev/null 2>&1; then
|
||||
echo "Gitea is healthy"
|
||||
exit 0
|
||||
fi
|
||||
echo "Waiting for Gitea... ($i/30)"
|
||||
sleep 2
|
||||
done
|
||||
echo "Health check timeout"
|
||||
exit 1
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: health_wait
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
when: gitea_recreate.changed
|
||||
|
||||
- name: Display health wait result
|
||||
debug:
|
||||
msg: "{{ health_wait.stdout_lines }}"
|
||||
when: gitea_recreate.changed
|
||||
|
||||
- name: Check Traefik service registration
|
||||
shell: |
|
||||
sleep 5 # Give Traefik time to discover
|
||||
docker exec traefik wget -qO- http://localhost:8080/api/http/services 2>&1 | grep -i gitea || echo "Service not found (may take a few seconds)"
|
||||
register: traefik_service
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display Traefik service registration
|
||||
debug:
|
||||
msg: "{{ traefik_service.stdout_lines }}"
|
||||
|
||||
- name: Test external Gitea access
|
||||
shell: |
|
||||
sleep 3 # Give Traefik time to update routing
|
||||
curl -k -H "User-Agent: Mozilla/5.0" -s -o /dev/null -w "HTTP Status: %{http_code}\n" https://git.michaelschiemer.de/ 2>&1 || echo "Connection failed"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: external_test
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display external test result
|
||||
debug:
|
||||
msg: "{{ external_test.stdout_lines }}"
|
||||
|
||||
- name: Summary
|
||||
debug:
|
||||
msg:
|
||||
- "=== FIX SUMMARY ==="
|
||||
- "Dynamic config removed: {{ 'Yes' if remove_config.changed else 'Already removed' }}"
|
||||
- "Labels added to docker-compose.yml: {{ 'Yes' if labels_added.changed else 'Already present' }}"
|
||||
- "Gitea container recreated: {{ 'Yes' if gitea_recreate.changed else 'No' }}"
|
||||
- ""
|
||||
- "Gitea should now be accessible via https://git.michaelschiemer.de"
|
||||
- "If issue persists, check Traefik logs for errors"
|
||||
139
deployment/ansible/playbooks/fix-gitea-traefik-labels.yml
Normal file
139
deployment/ansible/playbooks/fix-gitea-traefik-labels.yml
Normal file
@@ -0,0 +1,139 @@
|
||||
---
|
||||
- name: Fix Gitea Traefik Labels
|
||||
hosts: production
|
||||
gather_facts: yes
|
||||
become: no
|
||||
|
||||
vars:
|
||||
gitea_stack_path: "{{ stacks_base_path }}/gitea"
|
||||
|
||||
tasks:
|
||||
- name: Check current Gitea container status
|
||||
shell: |
|
||||
cd {{ gitea_stack_path }}
|
||||
docker compose ps gitea
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: gitea_status_before
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display current status
|
||||
debug:
|
||||
msg: "{{ gitea_status_before.stdout_lines }}"
|
||||
|
||||
- name: Check current Traefik labels
|
||||
shell: |
|
||||
docker inspect gitea --format '{{ '{{' }}range .Config.Labels{{ '}}' }}{{ '{{' }}.Key{{ '}}' }}={{ '{{' }}.Value{{ '}}' }}{{ '{{' }}\n{{ '}}' }}{{ '{{' }}end{{ '}}' }}' 2>&1 | grep -i traefik || echo "No Traefik labels found"
|
||||
register: current_labels
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display current labels
|
||||
debug:
|
||||
msg: "{{ current_labels.stdout_lines }}"
|
||||
|
||||
- name: Recreate Gitea container with Traefik labels
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ gitea_stack_path }}"
|
||||
state: present
|
||||
pull: never
|
||||
recreate: always
|
||||
remove_orphans: no
|
||||
register: gitea_recreate
|
||||
|
||||
- name: Wait for Gitea to be ready
|
||||
wait_for:
|
||||
port: 3000
|
||||
host: localhost
|
||||
timeout: 60
|
||||
delegate_to: localhost
|
||||
when: gitea_recreate.changed
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Wait for Gitea health check
|
||||
shell: |
|
||||
for i in {1..30}; do
|
||||
if docker exec gitea curl -f http://localhost:3000/api/healthz >/dev/null 2>&1; then
|
||||
echo "Gitea is healthy"
|
||||
exit 0
|
||||
fi
|
||||
echo "Waiting for Gitea to be healthy... ($i/30)"
|
||||
sleep 2
|
||||
done
|
||||
echo "Gitea health check timeout"
|
||||
exit 1
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: health_wait
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
when: gitea_recreate.changed
|
||||
|
||||
- name: Display health wait result
|
||||
debug:
|
||||
msg: "{{ health_wait.stdout_lines }}"
|
||||
when: gitea_recreate.changed
|
||||
|
||||
- name: Check new Gitea container status
|
||||
shell: |
|
||||
cd {{ gitea_stack_path }}
|
||||
docker compose ps gitea
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: gitea_status_after
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display new status
|
||||
debug:
|
||||
msg: "{{ gitea_status_after.stdout_lines }}"
|
||||
|
||||
- name: Check new Traefik labels
|
||||
shell: |
|
||||
docker inspect gitea --format '{{ '{{' }}range .Config.Labels{{ '}}' }}{{ '{{' }}.Key{{ '}}' }}={{ '{{' }}.Value{{ '}}' }}{{ '{{' }}\n{{ '}}' }}{{ '{{' }}end{{ '}}' }}' 2>&1 | grep -i traefik || echo "No Traefik labels found"
|
||||
register: new_labels
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display new labels
|
||||
debug:
|
||||
msg: "{{ new_labels.stdout_lines }}"
|
||||
|
||||
- name: Check Traefik service registration
|
||||
shell: |
|
||||
docker exec traefik wget -qO- http://localhost:8080/api/http/services 2>&1 | grep -i gitea || echo "Gitea service not found (may take a few seconds to register)"
|
||||
register: traefik_service
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display Traefik service registration
|
||||
debug:
|
||||
msg: "{{ traefik_service.stdout_lines }}"
|
||||
|
||||
- name: Test external Gitea access
|
||||
shell: |
|
||||
echo "Testing external access..."
|
||||
sleep 5 # Give Traefik time to update
|
||||
curl -k -H "User-Agent: Mozilla/5.0" -s -o /dev/null -w "HTTP Status: %{http_code}\n" https://git.michaelschiemer.de/ 2>&1 || echo "Connection failed"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: external_test
|
||||
ignore_errors: yes
|
||||
failed_when: false
|
||||
|
||||
- name: Display external test result
|
||||
debug:
|
||||
msg: "{{ external_test.stdout_lines }}"
|
||||
|
||||
- name: Summary
|
||||
debug:
|
||||
msg:
|
||||
- "=== FIX SUMMARY ==="
|
||||
- "Container recreated: {{ 'Yes' if gitea_recreate.changed else 'No' }}"
|
||||
- "Traefik labels: {{ 'Fixed' if 'traefik' in new_labels.stdout|lower else 'Still missing' }}"
|
||||
- ""
|
||||
- "If the issue persists:"
|
||||
- "1. Check Traefik logs: cd {{ stacks_base_path }}/traefik && docker compose logs traefik"
|
||||
- "2. Verify Traefik can reach Gitea: docker exec traefik ping -c 2 gitea"
|
||||
- "3. Check Gitea logs for errors: cd {{ gitea_stack_path }} && docker compose logs gitea"
|
||||
@@ -10,6 +10,7 @@
|
||||
wireguard_config_file: "{{ wireguard_config_path }}/{{ wireguard_interface }}.conf"
|
||||
wireguard_client_configs_path: "/etc/wireguard/clients"
|
||||
wireguard_local_client_configs_dir: "{{ playbook_dir }}/../wireguard-clients"
|
||||
wireguard_dns_servers: []
|
||||
|
||||
tasks:
|
||||
- name: Validate client name
|
||||
@@ -80,18 +81,19 @@
|
||||
|
||||
- name: Extract server IP from config
|
||||
set_fact:
|
||||
server_vpn_ip: "{{ (wireguard_server_config_read.content | b64decode | regex_search('Address = ([0-9.]+)')) | default(['10.8.0.1']) | first }}"
|
||||
server_vpn_ip: "{{ (wireguard_server_config_read.content | b64decode | regex_search('Address = ([0-9.]+)', '\\\\1')) | first | default('10.8.0.1') }}"
|
||||
failed_when: false
|
||||
|
||||
- name: Set default DNS servers
|
||||
set_fact:
|
||||
wireguard_dns_servers: "{{ [server_vpn_ip] }}"
|
||||
|
||||
- name: Extract WireGuard server IP octets
|
||||
set_fact:
|
||||
wireguard_server_ip_octets: "{{ server_vpn_ip.split('.') }}"
|
||||
wireguard_server_ip_octets: "{{ (server_vpn_ip | default('')).split('.') }}"
|
||||
when: client_ip == ""
|
||||
|
||||
- name: Fail if server VPN IP is invalid
|
||||
fail:
|
||||
msg: "Server VPN IP '{{ server_vpn_ip }}' ist ungültig – bitte wg0.conf prüfen."
|
||||
when: client_ip == "" and (wireguard_server_ip_octets | length) < 4
|
||||
|
||||
- name: Gather existing client addresses
|
||||
set_fact:
|
||||
existing_client_ips: "{{ (wireguard_server_config_read.content | b64decode | regex_findall('AllowedIPs = ([0-9A-Za-z.]+)/32', '\\\\1')) }}"
|
||||
@@ -109,7 +111,7 @@
|
||||
wireguard_server_ip_octets[2],
|
||||
next_octet_candidate
|
||||
] | join('.') }}"
|
||||
when: client_ip == ""
|
||||
when: client_ip == "" and (wireguard_server_ip_octets | length) >= 4
|
||||
|
||||
- name: Generate NEW client private key
|
||||
command: "wg genkey"
|
||||
|
||||
@@ -35,45 +35,37 @@
|
||||
file: "{{ vault_file }}"
|
||||
no_log: yes
|
||||
|
||||
- name: Ensure secrets directory exists
|
||||
- name: Ensure secrets directory exists for Docker Compose secrets
|
||||
file:
|
||||
path: "{{ secrets_path }}"
|
||||
path: "{{ app_stack_path }}/secrets"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: '0700'
|
||||
|
||||
- name: Create .env.production file
|
||||
template:
|
||||
src: "{{ playbook_dir }}/../templates/.env.production.j2"
|
||||
dest: "{{ secrets_path }}/.env.production"
|
||||
- name: Create Docker Compose secret files from vault
|
||||
copy:
|
||||
content: "{{ item.value }}"
|
||||
dest: "{{ app_stack_path }}/secrets/{{ item.name }}.txt"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: '0600'
|
||||
no_log: yes
|
||||
|
||||
- name: Create Docker secrets from vault (disabled for compose-only deployment)
|
||||
docker_secret:
|
||||
name: "{{ item.name }}"
|
||||
data: "{{ item.value }}"
|
||||
state: present
|
||||
loop:
|
||||
- name: db_password
|
||||
- name: db_user_password
|
||||
value: "{{ vault_db_password }}"
|
||||
- name: redis_password
|
||||
value: "{{ vault_redis_password }}"
|
||||
- name: app_key
|
||||
value: "{{ vault_app_key }}"
|
||||
- name: jwt_secret
|
||||
value: "{{ vault_jwt_secret }}"
|
||||
- name: mail_password
|
||||
value: "{{ vault_mail_password }}"
|
||||
- name: vault_encryption_key
|
||||
value: "{{ vault_encryption_key | default(vault_app_key) }}"
|
||||
- name: git_token
|
||||
value: "{{ vault_git_token | default('') }}"
|
||||
no_log: yes
|
||||
when: false
|
||||
|
||||
- name: Set secure permissions on secrets directory
|
||||
file:
|
||||
path: "{{ secrets_path }}"
|
||||
path: "{{ app_stack_path }}/secrets"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
---
|
||||
# Source path for application stack files on the control node
|
||||
application_stack_src: "{{ role_path }}/../../stacks/application"
|
||||
# Use playbook_dir as base, then go to ../stacks/application
|
||||
# This assumes playbooks are in deployment/ansible/playbooks
|
||||
application_stack_src: "{{ playbook_dir | default(role_path + '/..') }}/../stacks/application"
|
||||
|
||||
# Destination path on the target host (defaults to configured app_stack_path)
|
||||
application_stack_dest: "{{ app_stack_path | default(stacks_base_path + '/application') }}"
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
- name: Wait for application container to report Up
|
||||
shell: |
|
||||
docker compose -f {{ application_stack_dest }}/docker-compose.yml ps app | grep -Eiq "Up|running"
|
||||
docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.production.yml ps php | grep -Eiq "Up|running"
|
||||
register: application_app_running
|
||||
changed_when: false
|
||||
until: application_app_running.rc == 0
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
- name: Ensure app container is running before migrations
|
||||
shell: |
|
||||
docker compose -f {{ application_stack_dest }}/docker-compose.yml ps app | grep -Eiq "Up|running"
|
||||
docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.production.yml ps php | grep -Eiq "Up|running"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: application_app_container_running
|
||||
@@ -30,7 +30,7 @@
|
||||
|
||||
- name: Run database migrations
|
||||
shell: |
|
||||
docker compose -f {{ application_stack_dest }}/docker-compose.yml exec -T app {{ application_migration_command }}
|
||||
docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.production.yml exec -T php {{ application_migration_command }}
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: application_migration_result
|
||||
@@ -43,7 +43,7 @@
|
||||
- application_app_container_running.rc == 0
|
||||
|
||||
- name: Collect application container status
|
||||
shell: docker compose -f {{ application_stack_dest }}/docker-compose.yml ps
|
||||
shell: docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.production.yml ps
|
||||
register: application_ps
|
||||
changed_when: false
|
||||
ignore_errors: yes
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
file: "{{ application_vault_file }}"
|
||||
when: application_vault_stat.stat.exists
|
||||
no_log: yes
|
||||
ignore_errors: yes
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
|
||||
@@ -72,21 +73,57 @@
|
||||
application_encryption_key: "{{ encryption_key | default(vault_encryption_key | default('')) }}"
|
||||
no_log: yes
|
||||
|
||||
- name: Check if application docker-compose source exists locally
|
||||
- name: Check if application docker-compose.base.yml source exists locally
|
||||
stat:
|
||||
path: "{{ application_stack_src }}/docker-compose.base.yml"
|
||||
delegate_to: localhost
|
||||
register: application_compose_base_src
|
||||
become: no
|
||||
|
||||
- name: Check if application docker-compose.production.yml source exists locally
|
||||
stat:
|
||||
path: "{{ application_stack_src }}/../../../docker-compose.production.yml"
|
||||
delegate_to: localhost
|
||||
register: application_compose_prod_src
|
||||
become: no
|
||||
|
||||
- name: Copy application docker-compose.base.yml to target host
|
||||
copy:
|
||||
src: "{{ application_stack_src }}/docker-compose.base.yml"
|
||||
dest: "{{ application_stack_dest }}/docker-compose.base.yml"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: '0644'
|
||||
when: application_compose_base_src.stat.exists
|
||||
|
||||
- name: Copy application docker-compose.production.yml to target host
|
||||
copy:
|
||||
src: "{{ application_stack_src }}/../../../docker-compose.production.yml"
|
||||
dest: "{{ application_stack_dest }}/docker-compose.production.yml"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: '0644'
|
||||
when: application_compose_prod_src.stat.exists
|
||||
|
||||
- name: Check if legacy docker-compose.yml exists (fallback)
|
||||
stat:
|
||||
path: "{{ application_stack_src }}/docker-compose.yml"
|
||||
delegate_to: localhost
|
||||
register: application_compose_src
|
||||
become: no
|
||||
when: not (application_compose_base_src.stat.exists | default(false))
|
||||
|
||||
- name: Copy application docker-compose to target host
|
||||
- name: Copy application docker-compose.yml to target host (fallback for legacy)
|
||||
copy:
|
||||
src: "{{ application_stack_src }}/docker-compose.yml"
|
||||
dest: "{{ application_stack_dest }}/docker-compose.yml"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: '0644'
|
||||
when: application_compose_src.stat.exists
|
||||
when:
|
||||
- application_compose_src is defined
|
||||
- application_compose_src.stat.exists | default(false)
|
||||
- not (application_compose_base_src.stat.exists | default(false))
|
||||
|
||||
- name: Check if nginx configuration exists locally
|
||||
stat:
|
||||
|
||||
@@ -11,7 +11,9 @@ APP_DOMAIN={{ app_domain }}
|
||||
APP_ENV={{ app_env | default('production') }}
|
||||
APP_DEBUG={{ app_debug | default('false') }}
|
||||
APP_NAME={{ app_display_name | default(app_name | default('Framework') | replace('-', ' ') | title) }}
|
||||
APP_KEY={{ app_key }}
|
||||
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
|
||||
# APP_KEY is loaded from /run/secrets/app_key via APP_KEY_FILE
|
||||
APP_KEY_FILE=/run/secrets/app_key
|
||||
APP_TIMEZONE={{ app_timezone | default(timezone | default('Europe/Berlin')) }}
|
||||
APP_LOCALE={{ app_locale | default('de') }}
|
||||
APP_URL=https://{{ app_domain }}
|
||||
@@ -25,18 +27,22 @@ DB_HOST={{ db_host | default('postgres') }}
|
||||
DB_PORT={{ db_port | default('5432') }}
|
||||
DB_DATABASE={{ db_name | default(db_name_default) }}
|
||||
DB_USERNAME={{ db_user | default(db_user_default) }}
|
||||
DB_PASSWORD={{ db_password }}
|
||||
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
|
||||
# DB_PASSWORD is loaded from /run/secrets/db_user_password via DB_PASSWORD_FILE
|
||||
DB_PASSWORD_FILE=/run/secrets/db_user_password
|
||||
DB_CHARSET={{ db_charset | default('utf8') }}
|
||||
# Legacy variables (kept for backward compatibility)
|
||||
DB_NAME={{ db_name | default(db_name_default) }}
|
||||
DB_USER={{ db_user | default(db_user_default) }}
|
||||
DB_PASS={{ db_password }}
|
||||
# DB_PASS is loaded from Docker Secret via DB_PASSWORD_FILE
|
||||
|
||||
# Redis Configuration
|
||||
# Redis runs in this stack
|
||||
REDIS_HOST={{ redis_host | default('redis') }}
|
||||
REDIS_PORT={{ redis_port | default('6379') }}
|
||||
REDIS_PASSWORD={{ redis_password }}
|
||||
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
|
||||
# REDIS_PASSWORD is loaded from /run/secrets/redis_password via REDIS_PASSWORD_FILE
|
||||
REDIS_PASSWORD_FILE=/run/secrets/redis_password
|
||||
|
||||
# Security Configuration
|
||||
SECURITY_ALLOWED_HOSTS={{ security_allowed_hosts | default('localhost,' ~ app_domain ~ ',www.' ~ app_domain) }}
|
||||
@@ -59,11 +65,17 @@ QUEUE_WORKER_TRIES={{ queue_worker_tries | default('3') }}
|
||||
QUEUE_WORKER_TIMEOUT={{ queue_worker_timeout | default('60') }}
|
||||
|
||||
# Vault / Encryption
|
||||
VAULT_ENCRYPTION_KEY={{ encryption_key }}
|
||||
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
|
||||
# VAULT_ENCRYPTION_KEY is loaded from /run/secrets/vault_encryption_key via VAULT_ENCRYPTION_KEY_FILE
|
||||
VAULT_ENCRYPTION_KEY_FILE=/run/secrets/vault_encryption_key
|
||||
# APP_KEY is loaded from /run/secrets/app_key via APP_KEY_FILE
|
||||
APP_KEY_FILE=/run/secrets/app_key
|
||||
|
||||
# Git Repository Configuration (optional - if set, container will clone/pull code on start)
|
||||
GIT_REPOSITORY_URL={{ git_repository_url | default('') }}
|
||||
GIT_BRANCH={{ git_branch | default('main') }}
|
||||
GIT_TOKEN={{ git_token | default('') }}
|
||||
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
|
||||
# GIT_TOKEN is loaded from /run/secrets/git_token via GIT_TOKEN_FILE
|
||||
GIT_TOKEN_FILE=/run/secrets/git_token
|
||||
GIT_USERNAME={{ git_username | default('') }}
|
||||
GIT_PASSWORD={{ git_password | default('') }}
|
||||
|
||||
@@ -8,8 +8,10 @@ PrivateKey = {{ client_private_key.stdout }}
|
||||
# Client IP address in VPN network
|
||||
Address = {{ client_ip }}/24
|
||||
|
||||
# DNS server (VPN internal resolver)
|
||||
{% if wireguard_dns_servers | length > 0 %}
|
||||
# DNS servers provided via Ansible (optional)
|
||||
DNS = {{ wireguard_dns_servers | join(', ') }}
|
||||
{% endif %}
|
||||
|
||||
[Peer]
|
||||
# Server public key
|
||||
|
||||
213
deployment/stacks/application/docker-compose.base.yml
Normal file
213
deployment/stacks/application/docker-compose.base.yml
Normal file
@@ -0,0 +1,213 @@
|
||||
# Base Docker Compose Configuration
|
||||
# This file contains shared service definitions, networks, and volumes.
|
||||
# Use with environment-specific override files:
|
||||
# - docker-compose.local.yml (local development)
|
||||
# - docker-compose.staging.yml (staging environment)
|
||||
# - docker-compose.production.yml (production environment)
|
||||
#
|
||||
# Usage:
|
||||
# Local: docker-compose -f docker-compose.base.yml -f docker-compose.local.yml up
|
||||
# Staging: docker-compose -f docker-compose.base.yml -f docker-compose.staging.yml up
|
||||
# Production: docker-compose -f docker-compose.base.yml -f docker-compose.production.yml up
|
||||
|
||||
services:
|
||||
web:
|
||||
build:
|
||||
context: docker/nginx
|
||||
dockerfile: Dockerfile
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-z", "127.0.0.1", "443"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
depends_on:
|
||||
php:
|
||||
condition: service_started
|
||||
networks:
|
||||
- frontend
|
||||
- backend
|
||||
|
||||
php:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/php/Dockerfile
|
||||
args:
|
||||
- ENV=${APP_ENV:-dev}
|
||||
- COMPOSER_INSTALL_FLAGS=${COMPOSER_INSTALL_FLAGS:---no-scripts --no-autoloader}
|
||||
healthcheck:
|
||||
test: [ "CMD", "php", "-v" ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- backend
|
||||
- cache
|
||||
volumes:
|
||||
# Shared Volume für Composer-Cache über Container-Neustarts hinweg
|
||||
- composer-cache:/root/.composer/cache
|
||||
# Docker-Volumes für Performance (keine Host-Sync nötig)
|
||||
- storage-cache:/var/www/html/storage/cache:rw
|
||||
- storage-queue:/var/www/html/storage/queue:rw
|
||||
- storage-discovery:/var/www/html/storage/discovery:rw
|
||||
- var-data:/var/www/html/var:rw
|
||||
|
||||
php-test:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/php/Dockerfile.test
|
||||
user: "1000:1000"
|
||||
profiles:
|
||||
- test
|
||||
volumes:
|
||||
- composer-cache:/home/appuser/.composer/cache
|
||||
- storage-cache:/var/www/html/storage/cache:rw
|
||||
- storage-queue:/var/www/html/storage/queue:rw
|
||||
- storage-discovery:/var/www/html/storage/discovery:rw
|
||||
- var-data:/var/www/html/var:rw
|
||||
environment:
|
||||
APP_ENV: testing
|
||||
APP_DEBUG: true
|
||||
DB_HOST: db
|
||||
REDIS_HOST: redis
|
||||
networks:
|
||||
- backend
|
||||
- cache
|
||||
entrypoint: []
|
||||
command: ["php", "-v"]
|
||||
|
||||
db:
|
||||
image: postgres:16-alpine
|
||||
environment:
|
||||
POSTGRES_DB: ${DB_DATABASE:-michaelschiemer}
|
||||
POSTGRES_USER: ${DB_USERNAME:-postgres}
|
||||
# SECURITY: POSTGRES_PASSWORD must be set explicitly (no hardcoded fallback)
|
||||
# Set DB_PASSWORD in .env.local for local development
|
||||
# Use Docker Secrets in production/staging via DB_PASSWORD_FILE
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||
# Performance & Connection Settings
|
||||
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
|
||||
PGDATA: /var/lib/postgresql/data/pgdata
|
||||
volumes:
|
||||
- db_data:/var/lib/postgresql/data
|
||||
- "${DB_CONFIG_PATH:-./docker/postgres/postgresql.conf}:/etc/postgresql/postgresql.conf:ro"
|
||||
- "${DB_INIT_PATH:-./docker/postgres/init}:/docker-entrypoint-initdb.d:ro"
|
||||
command:
|
||||
- "postgres"
|
||||
- "-c"
|
||||
- "config_file=/etc/postgresql/postgresql.conf"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${DB_USERNAME:-postgres} -d ${DB_DATABASE:-michaelschiemer}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
networks:
|
||||
- backend
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
volumes:
|
||||
- "${REDIS_CONFIG_PATH:-./docker/redis/redis.conf}:/usr/local/etc/redis/redis.conf:ro"
|
||||
- redis_data:/data
|
||||
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
networks:
|
||||
- cache
|
||||
|
||||
queue-worker:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/worker/Dockerfile
|
||||
entrypoint: "" # Override any entrypoint
|
||||
command: ["php", "/var/www/html/worker.php"] # Direct command execution
|
||||
depends_on:
|
||||
php:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
db:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
# Use same storage volumes as PHP container for consistency
|
||||
- storage-cache:/var/www/html/storage/cache:rw
|
||||
- storage-queue:/var/www/html/storage/queue:rw
|
||||
- storage-discovery:/var/www/html/storage/discovery:rw
|
||||
- var-data:/var/www/html/var:rw
|
||||
networks:
|
||||
- backend
|
||||
- cache
|
||||
# Graceful shutdown timeout
|
||||
stop_grace_period: 30s
|
||||
|
||||
minio:
|
||||
image: minio/minio:latest
|
||||
environment:
|
||||
- TZ=Europe/Berlin
|
||||
# SECURITY: MINIO credentials must be set explicitly (no hardcoded fallback)
|
||||
# Set MINIO_ROOT_USER and MINIO_ROOT_PASSWORD in .env.local for local development
|
||||
# Use Docker Secrets in production/staging for production deployments
|
||||
- MINIO_ROOT_USER=${MINIO_ROOT_USER}
|
||||
- MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD}
|
||||
command: server /data --console-address ":9001"
|
||||
volumes:
|
||||
- minio_data:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
networks:
|
||||
- backend
|
||||
|
||||
networks:
|
||||
frontend:
|
||||
driver: bridge
|
||||
backend:
|
||||
driver: bridge
|
||||
cache:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
redis_data:
|
||||
composer-cache:
|
||||
storage-cache: # Cache-Verzeichnis (Performance-kritisch)
|
||||
storage-queue: # Queue-Verzeichnis (Performance-kritisch)
|
||||
storage-discovery: # Discovery-Cache (Framework-intern)
|
||||
var-data:
|
||||
db_data:
|
||||
project-data:
|
||||
worker-logs:
|
||||
worker-queue:
|
||||
worker-storage: # Complete separate storage for worker with correct permissions
|
||||
minio_data: # MinIO object storage data
|
||||
|
||||
# Docker Secrets Configuration
|
||||
# Secrets are defined here but activated in environment-specific override files
|
||||
secrets:
|
||||
db_root_password:
|
||||
file: ./secrets/db_root_password.txt
|
||||
external: false
|
||||
db_user_password:
|
||||
file: ./secrets/db_user_password.txt
|
||||
external: false
|
||||
redis_password:
|
||||
file: ./secrets/redis_password.txt
|
||||
external: false
|
||||
app_key:
|
||||
file: ./secrets/app_key.txt
|
||||
external: false
|
||||
vault_encryption_key:
|
||||
file: ./secrets/vault_encryption_key.txt
|
||||
external: false
|
||||
git_token:
|
||||
file: ./secrets/git_token.txt
|
||||
external: false
|
||||
|
||||
@@ -25,11 +25,16 @@ services:
|
||||
- DB_PORT=${DB_PORT:-5432}
|
||||
- DB_DATABASE=${DB_DATABASE}
|
||||
- DB_USERNAME=${DB_USERNAME}
|
||||
- DB_PASSWORD=${DB_PASSWORD}
|
||||
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
|
||||
- DB_PASSWORD_FILE=/run/secrets/db_user_password
|
||||
# Redis
|
||||
- REDIS_HOST=redis
|
||||
- REDIS_PORT=6379
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
|
||||
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
|
||||
secrets:
|
||||
- db_user_password
|
||||
- redis_password
|
||||
# Cache
|
||||
- CACHE_DRIVER=redis
|
||||
- CACHE_PREFIX=${CACHE_PREFIX:-app}
|
||||
@@ -181,22 +186,24 @@ services:
|
||||
- app-internal
|
||||
environment:
|
||||
- TZ=Europe/Berlin
|
||||
secrets:
|
||||
- redis_password
|
||||
command: >
|
||||
redis-server
|
||||
--requirepass ${REDIS_PASSWORD}
|
||||
sh -c "redis-server
|
||||
--requirepass $$(cat /run/secrets/redis_password)
|
||||
--maxmemory 512mb
|
||||
--maxmemory-policy allkeys-lru
|
||||
--save 900 1
|
||||
--save 300 10
|
||||
--save 60 10000
|
||||
--appendonly yes
|
||||
--appendfsync everysec
|
||||
--appendfsync everysec"
|
||||
volumes:
|
||||
- redis-data:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
|
||||
test: ["CMD", "sh", "-c", "redis-cli --no-auth-warning -a $$(cat /run/secrets/redis_password) ping"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -218,11 +225,16 @@ services:
|
||||
- DB_PORT=${DB_PORT:-5432}
|
||||
- DB_DATABASE=${DB_DATABASE}
|
||||
- DB_USERNAME=${DB_USERNAME}
|
||||
- DB_PASSWORD=${DB_PASSWORD}
|
||||
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
|
||||
- DB_PASSWORD_FILE=/run/secrets/db_user_password
|
||||
# Redis
|
||||
- REDIS_HOST=redis
|
||||
- REDIS_PORT=6379
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
|
||||
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
|
||||
secrets:
|
||||
- db_user_password
|
||||
- redis_password
|
||||
# Queue
|
||||
- QUEUE_DRIVER=redis
|
||||
- QUEUE_CONNECTION=default
|
||||
@@ -234,6 +246,9 @@ services:
|
||||
- app-logs:/var/www/html/storage/logs
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
secrets:
|
||||
- db_user_password
|
||||
- redis_password
|
||||
command: php console.php queue:work --queue=default --timeout=${QUEUE_WORKER_TIMEOUT:-60}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "php -r 'exit(0);' && test -f /var/www/html/console.php || exit 1"]
|
||||
@@ -263,11 +278,16 @@ services:
|
||||
- DB_PORT=${DB_PORT:-5432}
|
||||
- DB_DATABASE=${DB_DATABASE}
|
||||
- DB_USERNAME=${DB_USERNAME}
|
||||
- DB_PASSWORD=${DB_PASSWORD}
|
||||
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
|
||||
- DB_PASSWORD_FILE=/run/secrets/db_user_password
|
||||
# Redis
|
||||
- REDIS_HOST=redis
|
||||
- REDIS_PORT=6379
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
|
||||
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
|
||||
secrets:
|
||||
- db_user_password
|
||||
- redis_password
|
||||
volumes:
|
||||
- app-storage:/var/www/html/storage
|
||||
- app-logs:/var/www/html/storage/logs
|
||||
@@ -300,6 +320,12 @@ volumes:
|
||||
name: redis-data
|
||||
external: true
|
||||
|
||||
secrets:
|
||||
db_user_password:
|
||||
file: ./secrets/db_user_password.txt
|
||||
redis_password:
|
||||
file: ./secrets/redis_password.txt
|
||||
|
||||
networks:
|
||||
traefik-public:
|
||||
external: true
|
||||
|
||||
48
deployment/stacks/semaphore/.env.example
Normal file
48
deployment/stacks/semaphore/.env.example
Normal file
@@ -0,0 +1,48 @@
|
||||
# Semaphore CI Stack - Environment Configuration
|
||||
# Copy this file to .env and adjust values as needed
|
||||
|
||||
# ============================================
|
||||
# MySQL Database Configuration
|
||||
# ============================================
|
||||
MYSQL_ROOT_PASSWORD=semaphore_root
|
||||
MYSQL_DATABASE=semaphore
|
||||
MYSQL_USER=semaphore
|
||||
MYSQL_PASSWORD=semaphore
|
||||
|
||||
# ============================================
|
||||
# Semaphore Configuration
|
||||
# ============================================
|
||||
|
||||
# Port binding (default: 3000)
|
||||
# Only accessible via localhost (127.0.0.1)
|
||||
SEMAPHORE_PORT=3000
|
||||
|
||||
# Admin User Configuration
|
||||
SEMAPHORE_ADMIN=admin
|
||||
SEMAPHORE_ADMIN_NAME=Administrator
|
||||
SEMAPHORE_ADMIN_EMAIL=admin@localhost
|
||||
SEMAPHORE_ADMIN_PASSWORD=admin
|
||||
|
||||
# Playbook Storage Path (inside container)
|
||||
SEMAPHORE_PLAYBOOK_PATH=/tmp/semaphore
|
||||
|
||||
# Access Key Encryption
|
||||
# Generate with: head -c32 /dev/urandom | base64
|
||||
# IMPORTANT: Change this in production!
|
||||
SEMAPHORE_ACCESS_KEY_ENCRYPTION=change-me-in-production
|
||||
|
||||
# ============================================
|
||||
# Optional: LDAP Configuration
|
||||
# ============================================
|
||||
# SEMAPHORE_LDAP_ENABLED=false
|
||||
# SEMAPHORE_LDAP_HOST=ldap.example.com
|
||||
# SEMAPHORE_LDAP_PORT=389
|
||||
# SEMAPHORE_LDAP_DN=cn=admin,dc=example,dc=com
|
||||
# SEMAPHORE_LDAP_PASSWORD=ldap_password
|
||||
# SEMAPHORE_LDAP_BASE_DN=dc=example,dc=com
|
||||
# SEMAPHORE_LDAP_USER_FILTER=(uid=%s)
|
||||
|
||||
# ============================================
|
||||
# Optional: Webhook Configuration
|
||||
# ============================================
|
||||
# SEMAPHORE_WEBHOOK_URL=http://localhost:8080/webhook
|
||||
556
deployment/stacks/semaphore/README.md
Normal file
556
deployment/stacks/semaphore/README.md
Normal file
@@ -0,0 +1,556 @@
|
||||
# Semaphore CI Stack - Lokale Entwicklung
|
||||
|
||||
## Übersicht
|
||||
|
||||
Selbst-gehostete Semaphore CI/CD-Plattform für lokale Entwicklung, die es ermöglicht, CI/CD-Pipelines und Ansible-Playbooks lokal zu testen und auszuführen, ohne Abhängigkeit von externen CI-Services.
|
||||
|
||||
**Features**:
|
||||
- **Selbst-gehostet**: Läuft vollständig lokal auf dem Entwicklungsrechner
|
||||
- **Isoliert**: Keine externen Zugriffe, nur localhost (127.0.0.1)
|
||||
- **MySQL-Backend**: Persistente Datenbank für Projekte, Tasks und Templates
|
||||
- **Web-UI**: Intuitive Benutzeroberfläche für Pipeline-Management
|
||||
- **Ansible-Integration**: Native Unterstützung für Ansible-Playbooks
|
||||
- **Docker-basiert**: Einfaches Setup und Wartung
|
||||
|
||||
**Einsatzzweck**:
|
||||
- Lokales Testen von CI/CD-Pipelines
|
||||
- Entwicklung und Test von Ansible-Playbooks
|
||||
- Experimentieren mit Deployment-Workflows
|
||||
- Keine Abhängigkeit von externen CI-Services
|
||||
|
||||
## Services
|
||||
|
||||
- **mysql** - MySQL 8.0 Datenbank für Semaphore-Daten
|
||||
- **semaphore** - Semaphore CI/CD Web-UI und API
|
||||
|
||||
## Voraussetzungen
|
||||
|
||||
- Docker und Docker Compose installiert
|
||||
- Port 3001 auf localhost frei verfügbar (3000 wird von Gitea verwendet)
|
||||
- Ausreichend Speicherplatz für Docker Volumes (~500MB initial)
|
||||
|
||||
## Verzeichnisstruktur
|
||||
|
||||
```
|
||||
semaphore/
|
||||
├── docker-compose.yml # Service-Definitionen
|
||||
├── env.example # Environment-Variablen Template
|
||||
├── .env # Environment-Konfiguration (aus env.example erstellen)
|
||||
└── README.md # Diese Datei
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Environment-Datei erstellen
|
||||
|
||||
```bash
|
||||
cd deployment/stacks/semaphore
|
||||
cp env.example .env
|
||||
```
|
||||
|
||||
### 2. Konfiguration anpassen (Optional)
|
||||
|
||||
Bearbeite `.env` und passe die Werte an:
|
||||
|
||||
```bash
|
||||
nano .env
|
||||
```
|
||||
|
||||
**Wichtig**: Generiere einen sicheren Encryption Key:
|
||||
```bash
|
||||
# Linux/WSL
|
||||
head -c32 /dev/urandom | base64
|
||||
|
||||
# Windows PowerShell
|
||||
-join ((48..57) + (65..90) + (97..122) | Get-Random -Count 32 | % {[char]$_}) | ConvertTo-Base64
|
||||
```
|
||||
|
||||
Aktualisiere `SEMAPHORE_ACCESS_KEY_ENCRYPTION` in der `.env`-Datei.
|
||||
|
||||
### 3. Stack starten
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### 4. Semaphore Web-UI öffnen
|
||||
|
||||
Öffne im Browser: http://localhost:3001
|
||||
|
||||
**Standard-Login**:
|
||||
- **Username**: `admin` (oder Wert aus `SEMAPHORE_ADMIN`)
|
||||
- **Password**: `admin` (oder Wert aus `SEMAPHORE_ADMIN_PASSWORD`)
|
||||
|
||||
### 5. Erste Schritte in Semaphore
|
||||
|
||||
1. **Projekt erstellen**: Klicke auf "New Project" und erstelle ein neues Projekt
|
||||
2. **Inventory anlegen**: Erstelle ein Inventory mit lokalen Hosts oder Docker-Containern
|
||||
3. **Template erstellen**: Erstelle ein Template mit einem Ansible-Playbook
|
||||
4. **Task ausführen**: Starte einen Task und beobachte die Ausführung
|
||||
|
||||
## Konfiguration
|
||||
|
||||
### Environment-Variablen (.env)
|
||||
|
||||
#### MySQL-Datenbank
|
||||
|
||||
```env
|
||||
MYSQL_ROOT_PASSWORD=semaphore_root
|
||||
MYSQL_DATABASE=semaphore
|
||||
MYSQL_USER=semaphore
|
||||
MYSQL_PASSWORD=semaphore
|
||||
```
|
||||
|
||||
#### Semaphore-Konfiguration
|
||||
|
||||
```env
|
||||
# Port-Binding (Standard: 3001)
|
||||
SEMAPHORE_PORT=3001
|
||||
|
||||
# Admin-Benutzer
|
||||
SEMAPHORE_ADMIN=admin
|
||||
SEMAPHORE_ADMIN_NAME=Administrator
|
||||
SEMAPHORE_ADMIN_EMAIL=admin@localhost
|
||||
SEMAPHORE_ADMIN_PASSWORD=admin
|
||||
|
||||
# Playbook-Pfad (im Container)
|
||||
SEMAPHORE_PLAYBOOK_PATH=/tmp/semaphore
|
||||
|
||||
# Encryption Key (WICHTIG: Für Produktion ändern!)
|
||||
SEMAPHORE_ACCESS_KEY_ENCRYPTION=change-me-in-production
|
||||
```
|
||||
|
||||
#### Optionale Konfiguration
|
||||
|
||||
**LDAP-Integration** (Standard: deaktiviert):
|
||||
```env
|
||||
SEMAPHORE_LDAP_ENABLED=true
|
||||
SEMAPHORE_LDAP_HOST=ldap.example.com
|
||||
SEMAPHORE_LDAP_PORT=389
|
||||
SEMAPHORE_LDAP_DN=cn=admin,dc=example,dc=com
|
||||
SEMAPHORE_LDAP_PASSWORD=ldap_password
|
||||
SEMAPHORE_LDAP_BASE_DN=dc=example,dc=com
|
||||
SEMAPHORE_LDAP_USER_FILTER=(uid=%s)
|
||||
```
|
||||
|
||||
**Webhook-Integration**:
|
||||
```env
|
||||
SEMAPHORE_WEBHOOK_URL=http://localhost:8080/webhook
|
||||
```
|
||||
|
||||
## Verwendung
|
||||
|
||||
### Stack starten
|
||||
|
||||
```bash
|
||||
# Services im Hintergrund starten
|
||||
docker compose up -d
|
||||
|
||||
# Logs anzeigen
|
||||
docker compose logs -f
|
||||
|
||||
# Nur Semaphore-Logs
|
||||
docker compose logs -f semaphore
|
||||
|
||||
# Nur MySQL-Logs
|
||||
docker compose logs -f mysql
|
||||
```
|
||||
|
||||
### Stack stoppen
|
||||
|
||||
```bash
|
||||
docker compose down
|
||||
```
|
||||
|
||||
### Stack neu starten
|
||||
|
||||
```bash
|
||||
docker compose restart
|
||||
```
|
||||
|
||||
### Status prüfen
|
||||
|
||||
```bash
|
||||
# Container-Status anzeigen
|
||||
docker compose ps
|
||||
|
||||
# Health-Check-Status
|
||||
docker compose ps --format "table {{.Name}}\t{{.Status}}"
|
||||
|
||||
# Semaphore-Health-Check manuell
|
||||
docker compose exec semaphore wget --no-verbose --spider http://localhost:3000/api/health
|
||||
```
|
||||
|
||||
### Datenbank-Backup
|
||||
|
||||
```bash
|
||||
# MySQL-Dump erstellen
|
||||
docker compose exec mysql mysqldump -u semaphore -psemaphore semaphore > semaphore-backup-$(date +%Y%m%d).sql
|
||||
|
||||
# Backup wiederherstellen
|
||||
docker compose exec -T mysql mysql -u semaphore -psemaphore semaphore < semaphore-backup-YYYYMMDD.sql
|
||||
```
|
||||
|
||||
### Daten löschen und neu starten
|
||||
|
||||
```bash
|
||||
# ⚠️ WARNUNG: Löscht alle Daten!
|
||||
docker compose down -v
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Erste Schritte mit Semaphore
|
||||
|
||||
### 1. Projekt erstellen
|
||||
|
||||
1. Öffne http://localhost:3001 im Browser
|
||||
2. Melde dich mit Admin-Credentials an
|
||||
3. Klicke auf "New Project"
|
||||
4. Gib einen Projektnamen ein (z.B. "My Project")
|
||||
5. Klicke auf "Create"
|
||||
|
||||
### 2. Inventory anlegen
|
||||
|
||||
Ein Inventory definiert die Hosts, auf denen Playbooks ausgeführt werden sollen.
|
||||
|
||||
**Option A: Lokaler Host**
|
||||
1. Gehe zu Projekt → Inventories → New Inventory
|
||||
2. Name: "Local Hosts"
|
||||
3. Hinzufügen von Host:
|
||||
- Name: `localhost`
|
||||
- Address: `127.0.0.1`
|
||||
- SSH Username: `your-username`
|
||||
- SSH Key: Füge deinen privaten SSH-Key hinzu
|
||||
|
||||
**Option B: Docker-Container**
|
||||
1. Erstelle ein Inventory mit Docker-Hosts
|
||||
2. Für Docker-in-Docker Support benötigst du zusätzliche Konfiguration
|
||||
|
||||
### 3. Template erstellen
|
||||
|
||||
Templates definieren welche Playbooks ausgeführt werden sollen.
|
||||
|
||||
1. Gehe zu Projekt → Templates → New Template
|
||||
2. Template-Name: "Hello World"
|
||||
3. Inventory: Wähle dein Inventory
|
||||
4. Playbook: Erstelle ein einfaches Playbook:
|
||||
|
||||
```yaml
|
||||
---
|
||||
- hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Print hello world
|
||||
debug:
|
||||
msg: "Hello from Semaphore CI!"
|
||||
```
|
||||
|
||||
5. Speichere das Template
|
||||
|
||||
### 4. Task ausführen
|
||||
|
||||
1. Gehe zu Templates
|
||||
2. Klicke auf dein Template
|
||||
3. Klicke auf "Run"
|
||||
4. Beobachte die Ausführung in Echtzeit
|
||||
|
||||
## Integration mit bestehenden Stacks
|
||||
|
||||
### Verwendung mit lokaler Docker-Registry
|
||||
|
||||
Semaphore kann Docker-Images aus der lokalen Registry verwenden:
|
||||
|
||||
**In Ansible-Playbooks**:
|
||||
```yaml
|
||||
- name: Pull image from local registry
|
||||
docker_image:
|
||||
name: registry.michaelschiemer.de/framework:latest
|
||||
source: pull
|
||||
register: image_result
|
||||
```
|
||||
|
||||
**Voraussetzung**: Der Semaphore-Container muss Zugriff auf den Docker-Socket oder die Registry haben.
|
||||
|
||||
### Verwendung mit bestehenden Ansible-Playbooks
|
||||
|
||||
1. Mounte deine Playbooks als Volume:
|
||||
```yaml
|
||||
volumes:
|
||||
- /path/to/your/playbooks:/tmp/semaphore/playbooks:ro
|
||||
```
|
||||
|
||||
2. Oder kopiere Playbooks in den Container:
|
||||
```bash
|
||||
docker compose exec semaphore mkdir -p /tmp/semaphore/my-playbook
|
||||
docker cp my-playbook.yml semaphore:/tmp/semaphore/my-playbook/playbook.yml
|
||||
```
|
||||
|
||||
3. Verweise im Template auf den Playbook-Pfad
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Port-Konflikt (Port 3000 vs 3001)
|
||||
|
||||
**Problem**: Port 3000 ist standardmäßig von Gitea belegt, daher verwendet Semaphore Port 3001.
|
||||
|
||||
**Lösung**: Wenn du einen anderen Port verwenden möchtest, setze `SEMAPHORE_PORT` in der `.env` Datei:
|
||||
```env
|
||||
SEMAPHORE_PORT=8080 # Oder ein anderer freier Port
|
||||
```
|
||||
|
||||
**Wichtig**: Der interne Container-Port bleibt immer 3000 - nur der externe Host-Port ändert sich.
|
||||
|
||||
### Semaphore startet nicht
|
||||
|
||||
**Prüfe Logs**:
|
||||
```bash
|
||||
docker compose logs semaphore
|
||||
```
|
||||
|
||||
**Häufige Ursachen**:
|
||||
- MySQL ist noch nicht bereit (warte auf Health-Check)
|
||||
- Port 3001 ist bereits belegt: `netstat -tuln | grep 3001` (oder auf Windows: `netstat -ano | findstr :3001`)
|
||||
- Falsche Datenbank-Credentials
|
||||
|
||||
**Lösung**:
|
||||
```bash
|
||||
# Prüfe MySQL-Status
|
||||
docker compose ps mysql
|
||||
|
||||
# Prüfe Semaphore-Logs für DB-Verbindungsfehler
|
||||
docker compose logs semaphore | grep -i database
|
||||
|
||||
# Restart wenn nötig
|
||||
docker compose restart semaphore
|
||||
```
|
||||
|
||||
### MySQL startet nicht
|
||||
|
||||
**Prüfe MySQL-Logs**:
|
||||
```bash
|
||||
docker compose logs mysql
|
||||
```
|
||||
|
||||
**Häufige Ursachen**:
|
||||
- Volume-Permissions-Probleme
|
||||
- Port-Konflikte (unwahrscheinlich, da kein Port-Mapping)
|
||||
|
||||
**Lösung**:
|
||||
```bash
|
||||
# Prüfe Volume
|
||||
docker volume inspect semaphore-mysql-data
|
||||
|
||||
# Cleanup und Neu-Start (⚠️ Datenverlust!)
|
||||
docker compose down -v
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### Login funktioniert nicht
|
||||
|
||||
**Standard-Credentials**:
|
||||
- Username: `admin` (oder `SEMAPHORE_ADMIN` Wert)
|
||||
- Password: `admin` (oder `SEMAPHORE_ADMIN_PASSWORD` Wert)
|
||||
|
||||
**Admin-Passwort zurücksetzen**:
|
||||
1. Stoppe Semaphore: `docker compose stop semaphore`
|
||||
2. Setze `SEMAPHORE_ADMIN_PASSWORD` in `.env` auf neues Passwort
|
||||
3. Starte Semaphore: `docker compose up -d`
|
||||
|
||||
### Playbooks werden nicht gefunden
|
||||
|
||||
**Prüfe Playbook-Pfad**:
|
||||
```bash
|
||||
docker compose exec semaphore ls -la /tmp/semaphore
|
||||
```
|
||||
|
||||
**Lösung**:
|
||||
- Stelle sicher, dass `SEMAPHORE_PLAYBOOK_PATH` korrekt gesetzt ist
|
||||
- Prüfe, ob Playbooks im richtigen Pfad liegen
|
||||
- Stelle sicher, dass Datei-Berechtigungen korrekt sind
|
||||
|
||||
### Health-Check schlägt fehl
|
||||
|
||||
**Prüfe Health-Check**:
|
||||
```bash
|
||||
docker compose exec semaphore wget --no-verbose --spider http://localhost:3000/api/health
|
||||
```
|
||||
|
||||
**Lösung**:
|
||||
- Warte auf vollständigen Start (kann 1-2 Minuten dauern)
|
||||
- Prüfe Logs: `docker compose logs semaphore`
|
||||
- Restart wenn nötig: `docker compose restart semaphore`
|
||||
|
||||
## Sicherheit
|
||||
|
||||
### Lokale Entwicklung (Aktuell)
|
||||
|
||||
- ✅ Nur localhost-Zugriff (127.0.0.1:3000)
|
||||
- ✅ Isoliertes Netzwerk (kein externer Zugriff)
|
||||
- ✅ Keine Traefik-Integration
|
||||
- ⚠️ Standard-Passwörter (nur für lokale Entwicklung)
|
||||
|
||||
### Für Produktion
|
||||
|
||||
Wenn du Semaphore später für Produktion nutzen willst:
|
||||
|
||||
1. **Starke Passwörter**: Ändere alle Passwörter in `.env`
|
||||
2. **Encryption Key**: Generiere einen sicheren Key:
|
||||
```bash
|
||||
head -c32 /dev/urandom | base64
|
||||
```
|
||||
3. **Traefik-Integration**: Füge Traefik-Labels für HTTPS hinzu
|
||||
4. **LDAP/SSO**: Konfiguriere externe Authentifizierung
|
||||
5. **Backup-Strategie**: Regelmäßige MySQL-Backups einrichten
|
||||
6. **Resource Limits**: Füge Memory/CPU-Limits hinzu
|
||||
|
||||
## Wartung
|
||||
|
||||
### Regelmäßige Aufgaben
|
||||
|
||||
**Wöchentlich**:
|
||||
- Logs auf Fehler prüfen: `docker compose logs --tail=100`
|
||||
- Disk-Space prüfen: `docker system df`
|
||||
- Backup erstellen (wenn wichtige Daten vorhanden)
|
||||
|
||||
**Monatlich**:
|
||||
- Images aktualisieren: `docker compose pull && docker compose up -d`
|
||||
- Alte Tasks in Semaphore aufräumen (über Web-UI)
|
||||
|
||||
### Updates
|
||||
|
||||
```bash
|
||||
# Aktuelle Images herunterladen
|
||||
docker compose pull
|
||||
|
||||
# Mit neuen Images neu starten
|
||||
docker compose up -d
|
||||
|
||||
# Logs prüfen
|
||||
docker compose logs -f
|
||||
```
|
||||
|
||||
### Daten bereinigen
|
||||
|
||||
```bash
|
||||
# Alte Docker-Images löschen
|
||||
docker image prune -a
|
||||
|
||||
# Alte Volumes prüfen
|
||||
docker volume ls
|
||||
|
||||
# ⚠️ Vorsicht: Löscht alle Semaphore-Daten!
|
||||
docker compose down -v
|
||||
```
|
||||
|
||||
## Backup und Wiederherstellung
|
||||
|
||||
### Backup erstellen
|
||||
|
||||
```bash
|
||||
# MySQL-Dump
|
||||
docker compose exec mysql mysqldump \
|
||||
-u semaphore -psemaphore semaphore \
|
||||
> semaphore-backup-$(date +%Y%m%d-%H%M%S).sql
|
||||
|
||||
# Volume-Backup (komplett)
|
||||
docker run --rm \
|
||||
-v semaphore-mysql-data:/data \
|
||||
-v $(pwd):/backup \
|
||||
alpine tar czf /backup/semaphore-mysql-backup-$(date +%Y%m%d).tar.gz /data
|
||||
```
|
||||
|
||||
### Wiederherstellung
|
||||
|
||||
```bash
|
||||
# MySQL-Dump wiederherstellen
|
||||
docker compose exec -T mysql mysql \
|
||||
-u semaphore -psemaphore semaphore \
|
||||
< semaphore-backup-YYYYMMDD.sql
|
||||
|
||||
# Volume wiederherstellen (⚠️ stoppt Container)
|
||||
docker compose down
|
||||
docker run --rm \
|
||||
-v semaphore-mysql-data:/data \
|
||||
-v $(pwd):/backup \
|
||||
alpine sh -c "cd /data && tar xzf /backup/semaphore-mysql-backup-YYYYMMDD.tar.gz"
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Performance-Optimierung
|
||||
|
||||
### MySQL-Optimierung
|
||||
|
||||
Für bessere Performance kannst du MySQL-Konfiguration anpassen:
|
||||
|
||||
1. Erstelle `mysql/conf.d/my.cnf`:
|
||||
```ini
|
||||
[mysqld]
|
||||
innodb_buffer_pool_size = 256M
|
||||
max_connections = 100
|
||||
```
|
||||
|
||||
2. Mounte in `docker-compose.yml`:
|
||||
```yaml
|
||||
volumes:
|
||||
- ./mysql/conf.d:/etc/mysql/conf.d:ro
|
||||
```
|
||||
|
||||
### Resource Limits
|
||||
|
||||
Füge Limits in `docker-compose.yml` hinzu:
|
||||
|
||||
```yaml
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
cpus: '0.5'
|
||||
```
|
||||
|
||||
## Unterstützung
|
||||
|
||||
### Dokumentation
|
||||
|
||||
- **Semaphore CI Docs**: https://docs.semaphoreui.com/
|
||||
- **Semaphore GitHub**: https://github.com/semaphoreui/semaphore
|
||||
|
||||
### Logs
|
||||
|
||||
```bash
|
||||
# Alle Logs
|
||||
docker compose logs -f
|
||||
|
||||
# Semaphore-Logs
|
||||
docker compose logs -f semaphore
|
||||
|
||||
# MySQL-Logs
|
||||
docker compose logs -f mysql
|
||||
|
||||
# Letzte 100 Zeilen
|
||||
docker compose logs --tail=100
|
||||
```
|
||||
|
||||
### Health-Checks
|
||||
|
||||
```bash
|
||||
# Container-Status
|
||||
docker compose ps
|
||||
|
||||
# Semaphore-Health
|
||||
curl http://localhost:3001/api/health
|
||||
|
||||
# MySQL-Health
|
||||
docker compose exec mysql mysqladmin ping -h localhost -u root -psemaphore_root
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Setup-Status**: ✅ Bereit für lokale Entwicklung
|
||||
|
||||
**Nächste Schritte**:
|
||||
1. `cp env.example .env` ausführen
|
||||
2. `docker compose up -d` starten
|
||||
3. http://localhost:3001 öffnen
|
||||
4. Mit Admin-Credentials anmelden
|
||||
5. Erstes Projekt und Template erstellen
|
||||
|
||||
87
deployment/stacks/semaphore/docker-compose.yml
Normal file
87
deployment/stacks/semaphore/docker-compose.yml
Normal file
@@ -0,0 +1,87 @@
|
||||
services:
|
||||
# MySQL Database for Semaphore
|
||||
mysql:
|
||||
image: mysql:8.0
|
||||
container_name: semaphore-mysql
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- semaphore-internal
|
||||
environment:
|
||||
- TZ=Europe/Berlin
|
||||
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD:-semaphore_root}
|
||||
- MYSQL_DATABASE=${MYSQL_DATABASE:-semaphore}
|
||||
- MYSQL_USER=${MYSQL_USER:-semaphore}
|
||||
- MYSQL_PASSWORD=${MYSQL_PASSWORD:-semaphore}
|
||||
volumes:
|
||||
- semaphore-mysql-data:/var/lib/mysql
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
healthcheck:
|
||||
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-p${MYSQL_ROOT_PASSWORD:-semaphore_root}"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
command: >
|
||||
--default-authentication-plugin=mysql_native_password
|
||||
--character-set-server=utf8mb4
|
||||
--collation-server=utf8mb4_unicode_ci
|
||||
|
||||
# Semaphore CI/CD Platform
|
||||
semaphore:
|
||||
image: semaphoreui/semaphore:latest
|
||||
container_name: semaphore
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
mysql:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- semaphore-internal
|
||||
ports:
|
||||
# Only bind to localhost, not external interfaces
|
||||
# Default port 3001 to avoid conflict with Gitea (port 3000)
|
||||
- "127.0.0.1:${SEMAPHORE_PORT:-3001}:3000"
|
||||
environment:
|
||||
- TZ=Europe/Berlin
|
||||
# Database Configuration
|
||||
- SEMAPHORE_DB_DIALECT=mysql
|
||||
- SEMAPHORE_DB_HOST=mysql
|
||||
- SEMAPHORE_DB_PORT=3306
|
||||
- SEMAPHORE_DB=${MYSQL_DATABASE:-semaphore}
|
||||
- SEMAPHORE_DB_USER=${MYSQL_USER:-semaphore}
|
||||
- SEMAPHORE_DB_PASS=${MYSQL_PASSWORD:-semaphore}
|
||||
# Admin Configuration
|
||||
- SEMAPHORE_ADMIN=${SEMAPHORE_ADMIN:-admin}
|
||||
- SEMAPHORE_ADMIN_NAME=${SEMAPHORE_ADMIN_NAME:-Administrator}
|
||||
- SEMAPHORE_ADMIN_EMAIL=${SEMAPHORE_ADMIN_EMAIL:-admin@localhost}
|
||||
- SEMAPHORE_ADMIN_PASSWORD=${SEMAPHORE_ADMIN_PASSWORD:-admin}
|
||||
# Playbook Path
|
||||
- SEMAPHORE_PLAYBOOK_PATH=${SEMAPHORE_PLAYBOOK_PATH:-/tmp/semaphore}
|
||||
# Encryption Key (generate with: head -c32 /dev/urandom | base64)
|
||||
- SEMAPHORE_ACCESS_KEY_ENCRYPTION=${SEMAPHORE_ACCESS_KEY_ENCRYPTION:-change-me-in-production}
|
||||
# Optional: LDAP Configuration (disabled by default)
|
||||
- SEMAPHORE_LDAP_ENABLED=${SEMAPHORE_LDAP_ENABLED:-false}
|
||||
# Optional: Webhook Configuration
|
||||
- SEMAPHORE_WEBHOOK_URL=${SEMAPHORE_WEBHOOK_URL:-}
|
||||
volumes:
|
||||
- semaphore-data:/etc/semaphore
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/api/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
volumes:
|
||||
semaphore-mysql-data:
|
||||
name: semaphore-mysql-data
|
||||
semaphore-data:
|
||||
name: semaphore-data
|
||||
|
||||
networks:
|
||||
semaphore-internal:
|
||||
name: semaphore-internal
|
||||
driver: bridge
|
||||
|
||||
51
deployment/stacks/semaphore/env.example
Normal file
51
deployment/stacks/semaphore/env.example
Normal file
@@ -0,0 +1,51 @@
|
||||
# Semaphore CI Stack - Environment Configuration
|
||||
# Copy this file to .env and adjust values as needed
|
||||
# Note: Rename this file to .env.example if you prefer the standard naming
|
||||
|
||||
# ============================================
|
||||
# MySQL Database Configuration
|
||||
# ============================================
|
||||
MYSQL_ROOT_PASSWORD=semaphore_root
|
||||
MYSQL_DATABASE=semaphore
|
||||
MYSQL_USER=semaphore
|
||||
MYSQL_PASSWORD=semaphore
|
||||
|
||||
# ============================================
|
||||
# Semaphore Configuration
|
||||
# ============================================
|
||||
|
||||
# Port binding (default: 3001)
|
||||
# Only accessible via localhost (127.0.0.1)
|
||||
# Note: Changed from 3000 to avoid conflict with Gitea
|
||||
SEMAPHORE_PORT=3001
|
||||
|
||||
# Admin User Configuration
|
||||
SEMAPHORE_ADMIN=admin
|
||||
SEMAPHORE_ADMIN_NAME=Administrator
|
||||
SEMAPHORE_ADMIN_EMAIL=admin@localhost
|
||||
SEMAPHORE_ADMIN_PASSWORD=admin
|
||||
|
||||
# Playbook Storage Path (inside container)
|
||||
SEMAPHORE_PLAYBOOK_PATH=/tmp/semaphore
|
||||
|
||||
# Access Key Encryption
|
||||
# Generate with: head -c32 /dev/urandom | base64
|
||||
# IMPORTANT: Change this in production!
|
||||
SEMAPHORE_ACCESS_KEY_ENCRYPTION=change-me-in-production
|
||||
|
||||
# ============================================
|
||||
# Optional: LDAP Configuration
|
||||
# ============================================
|
||||
# SEMAPHORE_LDAP_ENABLED=false
|
||||
# SEMAPHORE_LDAP_HOST=ldap.example.com
|
||||
# SEMAPHORE_LDAP_PORT=389
|
||||
# SEMAPHORE_LDAP_DN=cn=admin,dc=example,dc=com
|
||||
# SEMAPHORE_LDAP_PASSWORD=ldap_password
|
||||
# SEMAPHORE_LDAP_BASE_DN=dc=example,dc=com
|
||||
# SEMAPHORE_LDAP_USER_FILTER=(uid=%s)
|
||||
|
||||
# ============================================
|
||||
# Optional: Webhook Configuration
|
||||
# ============================================
|
||||
# SEMAPHORE_WEBHOOK_URL=http://localhost:8080/webhook
|
||||
|
||||
Reference in New Issue
Block a user