fix: Gitea Traefik routing and connection pool optimization
Some checks failed
🚀 Build & Deploy Image / Determine Build Necessity (push) Failing after 10m14s
🚀 Build & Deploy Image / Build Runtime Base Image (push) Has been skipped
🚀 Build & Deploy Image / Build Docker Image (push) Has been skipped
🚀 Build & Deploy Image / Run Tests & Quality Checks (push) Has been skipped
🚀 Build & Deploy Image / Auto-deploy to Staging (push) Has been skipped
🚀 Build & Deploy Image / Auto-deploy to Production (push) Has been skipped
Security Vulnerability Scan / Check for Dependency Changes (push) Failing after 11m25s
Security Vulnerability Scan / Composer Security Audit (push) Has been cancelled

- Remove middleware reference from Gitea Traefik labels (caused routing issues)
- Optimize Gitea connection pool settings (MAX_IDLE_CONNS=30, authentication_timeout=180s)
- Add explicit service reference in Traefik labels
- Fix intermittent 504 timeouts by improving PostgreSQL connection handling

Fixes Gitea unreachability via git.michaelschiemer.de
This commit is contained in:
2025-11-09 14:46:15 +01:00
parent 85c369e846
commit 36ef2a1e2c
1366 changed files with 104925 additions and 28719 deletions

View File

@@ -1,213 +0,0 @@
# Base Docker Compose Configuration
# This file contains shared service definitions, networks, and volumes.
# Use with environment-specific override files:
# - docker-compose.local.yml (local development)
# - docker-compose.staging.yml (staging environment)
# - docker-compose.production.yml (production environment)
#
# Usage:
# Local: docker-compose -f docker-compose.base.yml -f docker-compose.local.yml up
# Staging: docker-compose -f docker-compose.base.yml -f docker-compose.staging.yml up
# Production: docker-compose -f docker-compose.base.yml -f docker-compose.production.yml up
services:
web:
build:
context: docker/nginx
dockerfile: Dockerfile
healthcheck:
test: ["CMD", "nc", "-z", "127.0.0.1", "443"]
interval: 30s
timeout: 10s
retries: 3
depends_on:
php:
condition: service_started
networks:
- frontend
- backend
php:
build:
context: .
dockerfile: docker/php/Dockerfile
args:
- ENV=${APP_ENV:-dev}
- COMPOSER_INSTALL_FLAGS=${COMPOSER_INSTALL_FLAGS:---no-scripts --no-autoloader}
healthcheck:
test: [ "CMD", "php", "-v" ]
interval: 30s
timeout: 10s
retries: 3
networks:
- backend
- cache
volumes:
# Shared Volume für Composer-Cache über Container-Neustarts hinweg
- composer-cache:/root/.composer/cache
# Docker-Volumes für Performance (keine Host-Sync nötig)
- storage-cache:/var/www/html/storage/cache:rw
- storage-queue:/var/www/html/storage/queue:rw
- storage-discovery:/var/www/html/storage/discovery:rw
- var-data:/var/www/html/var:rw
php-test:
build:
context: .
dockerfile: docker/php/Dockerfile.test
user: "1000:1000"
profiles:
- test
volumes:
- composer-cache:/home/appuser/.composer/cache
- storage-cache:/var/www/html/storage/cache:rw
- storage-queue:/var/www/html/storage/queue:rw
- storage-discovery:/var/www/html/storage/discovery:rw
- var-data:/var/www/html/var:rw
environment:
APP_ENV: testing
APP_DEBUG: true
DB_HOST: db
REDIS_HOST: redis
networks:
- backend
- cache
entrypoint: []
command: ["php", "-v"]
db:
image: postgres:16-alpine
environment:
POSTGRES_DB: ${DB_DATABASE:-michaelschiemer}
POSTGRES_USER: ${DB_USERNAME:-postgres}
# SECURITY: POSTGRES_PASSWORD must be set explicitly (no hardcoded fallback)
# Set DB_PASSWORD in .env.local for local development
# Use Docker Secrets in production/staging via DB_PASSWORD_FILE
POSTGRES_PASSWORD: ${DB_PASSWORD}
# Performance & Connection Settings
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
PGDATA: /var/lib/postgresql/data/pgdata
volumes:
- db_data:/var/lib/postgresql/data
- "${DB_CONFIG_PATH:-./docker/postgres/postgresql.conf}:/etc/postgresql/postgresql.conf:ro"
- "${DB_INIT_PATH:-./docker/postgres/init}:/docker-entrypoint-initdb.d:ro"
command:
- "postgres"
- "-c"
- "config_file=/etc/postgresql/postgresql.conf"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USERNAME:-postgres} -d ${DB_DATABASE:-michaelschiemer}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
networks:
- backend
redis:
image: redis:7-alpine
volumes:
- "${REDIS_CONFIG_PATH:-./docker/redis/redis.conf}:/usr/local/etc/redis/redis.conf:ro"
- redis_data:/data
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
networks:
- cache
queue-worker:
build:
context: .
dockerfile: docker/worker/Dockerfile
entrypoint: "" # Override any entrypoint
command: ["php", "/var/www/html/worker.php"] # Direct command execution
depends_on:
php:
condition: service_healthy
redis:
condition: service_healthy
db:
condition: service_healthy
volumes:
# Use same storage volumes as PHP container for consistency
- storage-cache:/var/www/html/storage/cache:rw
- storage-queue:/var/www/html/storage/queue:rw
- storage-discovery:/var/www/html/storage/discovery:rw
- var-data:/var/www/html/var:rw
networks:
- backend
- cache
# Graceful shutdown timeout
stop_grace_period: 30s
minio:
image: minio/minio:latest
environment:
- TZ=Europe/Berlin
# SECURITY: MINIO credentials must be set explicitly (no hardcoded fallback)
# Set MINIO_ROOT_USER and MINIO_ROOT_PASSWORD in .env.local for local development
# Use Docker Secrets in production/staging for production deployments
- MINIO_ROOT_USER=${MINIO_ROOT_USER}
- MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD}
command: server /data --console-address ":9001"
volumes:
- minio_data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
networks:
- backend
networks:
frontend:
driver: bridge
backend:
driver: bridge
cache:
driver: bridge
volumes:
redis_data:
composer-cache:
storage-cache: # Cache-Verzeichnis (Performance-kritisch)
storage-queue: # Queue-Verzeichnis (Performance-kritisch)
storage-discovery: # Discovery-Cache (Framework-intern)
var-data:
db_data:
project-data:
worker-logs:
worker-queue:
worker-storage: # Complete separate storage for worker with correct permissions
minio_data: # MinIO object storage data
# Docker Secrets Configuration
# Secrets are defined here but activated in environment-specific override files
secrets:
db_root_password:
file: ./secrets/db_root_password.txt
external: false
db_user_password:
file: ./secrets/db_user_password.txt
external: false
redis_password:
file: ./secrets/redis_password.txt
external: false
app_key:
file: ./secrets/app_key.txt
external: false
vault_encryption_key:
file: ./secrets/vault_encryption_key.txt
external: false
git_token:
file: ./secrets/git_token.txt
external: false

View File

@@ -1,334 +0,0 @@
# Docker Registry: registry.michaelschiemer.de (HTTPS via Traefik)
services:
# PHP-FPM Application Runtime
app:
image: git.michaelschiemer.de:5000/framework:latest
container_name: app
restart: unless-stopped
networks:
- app-internal
environment:
- TZ=Europe/Berlin
- APP_ENV=${APP_ENV:-production}
- APP_DEBUG=${APP_DEBUG:-false}
- APP_URL=${APP_URL:-https://michaelschiemer.de}
- APP_KEY=${APP_KEY:-}
# Git Repository (optional - if set, container will clone/pull code on start)
- GIT_REPOSITORY_URL=${GIT_REPOSITORY_URL:-}
- GIT_BRANCH=${GIT_BRANCH:-main}
- GIT_TOKEN=${GIT_TOKEN:-}
- GIT_USERNAME=${GIT_USERNAME:-}
- GIT_PASSWORD=${GIT_PASSWORD:-}
# Database
- DB_HOST=${DB_HOST:-postgres}
- DB_PORT=${DB_PORT:-5432}
- DB_DATABASE=${DB_DATABASE}
- DB_USERNAME=${DB_USERNAME}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- DB_PASSWORD_FILE=/run/secrets/db_user_password
# Redis
- REDIS_HOST=redis
- REDIS_PORT=6379
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
secrets:
- db_user_password
- redis_password
# Cache
- CACHE_DRIVER=redis
- CACHE_PREFIX=${CACHE_PREFIX:-app}
# Session
- SESSION_DRIVER=redis
- SESSION_LIFETIME=${SESSION_LIFETIME:-120}
# Queue
- QUEUE_DRIVER=redis
- QUEUE_CONNECTION=default
volumes:
- app-code:/var/www/html
- app-storage:/var/www/html/storage
- app-logs:/var/www/html/storage/logs
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
healthcheck:
test: ["CMD-SHELL", "true"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
depends_on:
redis:
condition: service_started
# Nginx Web Server
# Uses same image as app - clones code from Git if GIT_REPOSITORY_URL is set, then runs nginx
nginx:
image: git.michaelschiemer.de:5000/framework:latest
container_name: nginx
restart: unless-stopped
networks:
- traefik-public
- app-internal
environment:
- TZ=Europe/Berlin
- APP_ENV=${APP_ENV:-production}
- APP_DEBUG=${APP_DEBUG:-false}
# Git Repository (same as app - will clone code on start)
- GIT_REPOSITORY_URL=${GIT_REPOSITORY_URL:-}
- GIT_BRANCH=${GIT_BRANCH:-main}
- GIT_TOKEN=${GIT_TOKEN:-}
- GIT_USERNAME=${GIT_USERNAME:-}
- GIT_PASSWORD=${GIT_PASSWORD:-}
volumes:
- ./nginx/conf.d:/etc/nginx/conf.d:ro
- app-storage:/var/www/html/storage:ro
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
# Use custom entrypoint that ensures code is available then starts nginx only (no PHP-FPM)
entrypoint: ["/bin/sh", "-c"]
command:
- |
# Ensure code is available in /var/www/html (from image or Git)
GIT_TARGET_DIR="/var/www/html"
# If storage is mounted but code is missing, copy from image's original location
if [ ! -d "$$GIT_TARGET_DIR/public" ] && [ -d "/var/www/html.orig" ]; then
echo "?? [nginx] Copying code from image..."
# Copy everything except storage (which is a volume mount)
find /var/www/html.orig -mindepth 1 -maxdepth 1 ! -name "storage" -exec cp -r {} "$$GIT_TARGET_DIR/" \; 2>/dev/null || true
fi
if [ -n "$$GIT_REPOSITORY_URL" ]; then
# Configure Git to be non-interactive
export GIT_TERMINAL_PROMPT=0
export GIT_ASKPASS=echo
# Determine authentication method
if [ -n "$$GIT_TOKEN" ]; then
GIT_URL_WITH_AUTH=$$(echo "$$GIT_REPOSITORY_URL" | sed "s|https://|https://$${GIT_TOKEN}@|")
elif [ -n "$$GIT_USERNAME" ] && [ -n "$$GIT_PASSWORD" ]; then
GIT_URL_WITH_AUTH=$$(echo "$$GIT_REPOSITORY_URL" | sed "s|https://|https://$${GIT_USERNAME}:$${GIT_PASSWORD}@|")
else
echo "⚠️ [nginx] No Git credentials provided (GIT_TOKEN or GIT_USERNAME/GIT_PASSWORD). Using image contents."
GIT_URL_WITH_AUTH=""
fi
if [ -n "$$GIT_URL_WITH_AUTH" ] && [ ! -d "$$GIT_TARGET_DIR/.git" ]; then
echo "?? [nginx] Cloning repository from $$GIT_REPOSITORY_URL (branch: $${GIT_BRANCH:-main})..."
# Remove only files/dirs that are not storage (which is a volume mount)
# Clone into a temporary directory first, then move contents
TEMP_CLONE="$${GIT_TARGET_DIR}.tmp"
rm -rf "$$TEMP_CLONE" 2>/dev/null || true
if git clone --branch "$${GIT_BRANCH:-main}" --depth 1 "$$GIT_URL_WITH_AUTH" "$$TEMP_CLONE"; then
# Remove only files/dirs that are not storage (which is a volume mount)
find "$$GIT_TARGET_DIR" -mindepth 1 -maxdepth 1 ! -name "storage" -exec rm -rf {} \\; 2>/dev/null || true
# Move contents from temp directory to target (preserving storage)
find "$$TEMP_CLONE" -mindepth 1 -maxdepth 1 ! -name "." ! -name ".." -exec mv {} "$$GIT_TARGET_DIR/" \\; 2>/dev/null || true
rm -rf "$$TEMP_CLONE" 2>/dev/null || true
echo "✅ [nginx] Repository cloned successfully"
else
echo "? Git clone failed. Using image contents."
rm -rf "$$TEMP_CLONE" 2>/dev/null || true
fi
else
echo "?? [nginx] Pulling latest changes..."
cd "$$GIT_TARGET_DIR"
git fetch origin "$${GIT_BRANCH:-main}" || true
git reset --hard "origin/$${GIT_BRANCH:-main}" || true
git clean -fd || true
fi
if [ -f "$$GIT_TARGET_DIR/composer.json" ]; then
echo "?? [nginx] Installing dependencies..."
cd "$$GIT_TARGET_DIR"
composer install --no-dev --optimize-autoloader --no-interaction --no-scripts || true
composer dump-autoload --optimize --classmap-authoritative || true
fi
echo "? [nginx] Git sync completed"
else
echo "?? [nginx] GIT_REPOSITORY_URL not set, using code from image"
fi
# Start nginx only (no PHP-FPM)
echo "?? [nginx] Starting nginx..."
exec nginx -g "daemon off;"
labels:
- "traefik.enable=true"
# HTTP Router
- "traefik.http.routers.app.rule=Host(`${APP_DOMAIN:-michaelschiemer.de}`)"
- "traefik.http.routers.app.entrypoints=websecure"
- "traefik.http.routers.app.tls=true"
- "traefik.http.routers.app.tls.certresolver=letsencrypt"
# Service
- "traefik.http.services.app.loadbalancer.server.port=80"
# Middleware
- "traefik.http.routers.app.middlewares=default-chain@file"
# Network
- "traefik.docker.network=traefik-public"
healthcheck:
test: ["CMD-SHELL", "curl -f http://127.0.0.1/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
depends_on:
app:
condition: service_started
# Redis Cache/Session/Queue Backend
redis:
image: redis:7-alpine
container_name: redis
restart: unless-stopped
networks:
- app-internal
environment:
- TZ=Europe/Berlin
secrets:
- redis_password
command: >
sh -c "redis-server
--requirepass $$(cat /run/secrets/redis_password)
--maxmemory 512mb
--maxmemory-policy allkeys-lru
--save 900 1
--save 300 10
--save 60 10000
--appendonly yes
--appendfsync everysec"
volumes:
- redis-data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
healthcheck:
test: ["CMD", "sh", "-c", "redis-cli --no-auth-warning -a $$(cat /run/secrets/redis_password) ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
# Queue Worker (Background Jobs)
queue-worker:
image: git.michaelschiemer.de:5000/framework:latest
container_name: queue-worker
restart: unless-stopped
networks:
- app-internal
environment:
- TZ=Europe/Berlin
- APP_ENV=${APP_ENV:-production}
- APP_DEBUG=${APP_DEBUG:-false}
# Database
- DB_HOST=${DB_HOST:-postgres}
- DB_PORT=${DB_PORT:-5432}
- DB_DATABASE=${DB_DATABASE}
- DB_USERNAME=${DB_USERNAME}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- DB_PASSWORD_FILE=/run/secrets/db_user_password
# Redis
- REDIS_HOST=redis
- REDIS_PORT=6379
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
secrets:
- db_user_password
- redis_password
# Queue
- QUEUE_DRIVER=redis
- QUEUE_CONNECTION=default
- QUEUE_WORKER_SLEEP=${QUEUE_WORKER_SLEEP:-3}
- QUEUE_WORKER_TRIES=${QUEUE_WORKER_TRIES:-3}
- QUEUE_WORKER_TIMEOUT=${QUEUE_WORKER_TIMEOUT:-60}
volumes:
- app-storage:/var/www/html/storage
- app-logs:/var/www/html/storage/logs
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
secrets:
- db_user_password
- redis_password
command: php console.php queue:work --queue=default --timeout=${QUEUE_WORKER_TIMEOUT:-60}
healthcheck:
test: ["CMD-SHELL", "php -r 'exit(0);' && test -f /var/www/html/console.php || exit 1"]
interval: 60s
timeout: 10s
retries: 3
start_period: 30s
depends_on:
app:
condition: service_started
redis:
condition: service_started
# Scheduler (Cron Jobs)
scheduler:
image: git.michaelschiemer.de:5000/framework:latest
container_name: scheduler
restart: unless-stopped
networks:
- app-internal
environment:
- TZ=Europe/Berlin
- APP_ENV=${APP_ENV:-production}
- APP_DEBUG=${APP_DEBUG:-false}
# Database
- DB_HOST=${DB_HOST:-postgres}
- DB_PORT=${DB_PORT:-5432}
- DB_DATABASE=${DB_DATABASE}
- DB_USERNAME=${DB_USERNAME}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- DB_PASSWORD_FILE=/run/secrets/db_user_password
# Redis
- REDIS_HOST=redis
- REDIS_PORT=6379
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
secrets:
- db_user_password
- redis_password
volumes:
- app-storage:/var/www/html/storage
- app-logs:/var/www/html/storage/logs
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
command: php console.php scheduler:run
healthcheck:
test: ["CMD-SHELL", "php -r 'exit(0);' && test -f /var/www/html/console.php || exit 1"]
interval: 60s
timeout: 10s
retries: 3
start_period: 30s
depends_on:
app:
condition: service_started
redis:
condition: service_started
volumes:
app-code:
name: app-code
external: true
app-storage:
name: app-storage
external: true
app-logs:
name: app-logs
external: true
redis-data:
name: redis-data
external: true
secrets:
db_user_password:
file: ./secrets/db_user_password.txt
redis_password:
file: ./secrets/redis_password.txt
networks:
traefik-public:
external: true
app-internal:
external: true
name: app-internal

View File

@@ -1,286 +0,0 @@
version: '3.8'
# Docker Registry: registry.michaelschiemer.de (HTTPS via Traefik)
services:
# PHP-FPM Application Runtime
app:
image: git.michaelschiemer.de:5000/framework:latest
container_name: app
restart: unless-stopped
networks:
- app-internal
environment:
- TZ=Europe/Berlin
- APP_ENV=${APP_ENV:-production}
- APP_DEBUG=${APP_DEBUG:-false}
- APP_URL=${APP_URL:-https://michaelschiemer.de}
# Git Repository (optional - if set, container will clone/pull code on start)
- GIT_REPOSITORY_URL=${GIT_REPOSITORY_URL:-}
- GIT_BRANCH=${GIT_BRANCH:-main}
- GIT_TOKEN=${GIT_TOKEN:-}
- GIT_USERNAME=${GIT_USERNAME:-}
- GIT_PASSWORD=${GIT_PASSWORD:-}
# Database
- DB_HOST=${DB_HOST:-postgres}
- DB_PORT=${DB_PORT:-5432}
- DB_DATABASE=${DB_DATABASE}
- DB_USERNAME=${DB_USERNAME}
- DB_PASSWORD=${DB_PASSWORD}
# Redis
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD}
# Cache
- CACHE_DRIVER=redis
- CACHE_PREFIX=${CACHE_PREFIX:-app}
# Session
- SESSION_DRIVER=redis
- SESSION_LIFETIME=${SESSION_LIFETIME:-120}
# Queue
- QUEUE_DRIVER=redis
- QUEUE_CONNECTION=default
volumes:
- app-storage:/var/www/html/storage
- app-logs:/var/www/html/storage/logs
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
healthcheck:
test: ["CMD-SHELL", "true"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
depends_on:
redis:
condition: service_started
# Nginx Web Server
# Uses same image as app - clones code from Git if GIT_REPOSITORY_URL is set, then runs nginx
nginx:
image: git.michaelschiemer.de:5000/framework:latest
container_name: nginx
restart: unless-stopped
networks:
- traefik-public
- app-internal
environment:
- TZ=Europe/Berlin
- APP_ENV=${APP_ENV:-production}
- APP_DEBUG=${APP_DEBUG:-false}
# Git Repository (same as app - will clone code on start)
- GIT_REPOSITORY_URL=${GIT_REPOSITORY_URL:-}
- GIT_BRANCH=${GIT_BRANCH:-main}
- GIT_TOKEN=${GIT_TOKEN:-}
- GIT_USERNAME=${GIT_USERNAME:-}
- GIT_PASSWORD=${GIT_PASSWORD:-}
volumes:
- ./nginx/conf.d:/etc/nginx/conf.d:ro
- app-storage:/var/www/html/storage:ro
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
# Use custom entrypoint that ensures code is available then starts nginx only (no PHP-FPM)
entrypoint: ["/bin/sh", "-c"]
command:
- |
# Ensure code is available in /var/www/html (from image or Git)
GIT_TARGET_DIR="/var/www/html"
# If storage is mounted but code is missing, copy from image's original location
if [ ! -d "$$GIT_TARGET_DIR/public" ] && [ -d "/var/www/html.orig" ]; then
echo "?? [nginx] Copying code from image..."
# Copy everything except storage (which is a volume mount)
find /var/www/html.orig -mindepth 1 -maxdepth 1 ! -name "storage" -exec cp -r {} "$$GIT_TARGET_DIR/" \; 2>/dev/null || true
fi
if [ -n "$$GIT_REPOSITORY_URL" ]; then
# Determine authentication method
if [ -n "$$GIT_TOKEN" ]; then
GIT_URL_WITH_AUTH=$$(echo "$$GIT_REPOSITORY_URL" | sed "s|https://|https://$${GIT_TOKEN}@|")
elif [ -n "$$GIT_USERNAME" ] && [ -n "$$GIT_PASSWORD" ]; then
GIT_URL_WITH_AUTH=$$(echo "$$GIT_REPOSITORY_URL" | sed "s|https://|https://$${GIT_USERNAME}:$${GIT_PASSWORD}@|")
else
GIT_URL_WITH_AUTH="$$GIT_REPOSITORY_URL"
fi
if [ ! -d "$$GIT_TARGET_DIR/.git" ]; then
echo "?? [nginx] Cloning repository from $$GIT_REPOSITORY_URL (branch: $${GIT_BRANCH:-main})..."
# Remove only files/dirs that are not storage (which is a volume mount)
find "$$GIT_TARGET_DIR" -mindepth 1 -maxdepth 1 ! -name "storage" -exec rm -rf {} \; 2>/dev/null || true
git clone --branch "$${GIT_BRANCH:-main}" --depth 1 "$$GIT_URL_WITH_AUTH" "$$GIT_TARGET_DIR" || {
echo "? Git clone failed. Using image contents."
}
else
echo "?? [nginx] Pulling latest changes..."
cd "$$GIT_TARGET_DIR"
git fetch origin "$${GIT_BRANCH:-main}" || true
git reset --hard "origin/$${GIT_BRANCH:-main}" || true
git clean -fd || true
fi
if [ -f "$$GIT_TARGET_DIR/composer.json" ]; then
echo "?? [nginx] Installing dependencies..."
cd "$$GIT_TARGET_DIR"
composer install --no-dev --optimize-autoloader --no-interaction --no-scripts || true
composer dump-autoload --optimize --classmap-authoritative || true
fi
echo "? [nginx] Git sync completed"
else
echo "?? [nginx] GIT_REPOSITORY_URL not set, using code from image"
fi
# Start nginx only (no PHP-FPM)
echo "?? [nginx] Starting nginx..."
exec nginx -g "daemon off;"
labels:
- "traefik.enable=true"
# HTTP Router
- "traefik.http.routers.app.rule=Host(`${APP_DOMAIN:-michaelschiemer.de}`)"
- "traefik.http.routers.app.entrypoints=websecure"
- "traefik.http.routers.app.tls=true"
- "traefik.http.routers.app.tls.certresolver=letsencrypt"
# Service
- "traefik.http.services.app.loadbalancer.server.port=80"
# Middleware
- "traefik.http.routers.app.middlewares=default-chain@file"
# Network
- "traefik.docker.network=traefik-public"
healthcheck:
test: ["CMD-SHELL", "curl -f http://127.0.0.1/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
depends_on:
app:
condition: service_started
# Redis Cache/Session/Queue Backend
redis:
image: redis:7-alpine
container_name: redis
restart: unless-stopped
networks:
- app-internal
environment:
- TZ=Europe/Berlin
command: >
redis-server
--requirepass ${REDIS_PASSWORD}
--maxmemory 512mb
--maxmemory-policy allkeys-lru
--save 900 1
--save 300 10
--save 60 10000
--appendonly yes
--appendfsync everysec
volumes:
- redis-data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
healthcheck:
test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
# Queue Worker (Background Jobs)
queue-worker:
image: git.michaelschiemer.de:5000/framework:latest
container_name: queue-worker
restart: unless-stopped
networks:
- app-internal
environment:
- TZ=Europe/Berlin
- APP_ENV=${APP_ENV:-production}
- APP_DEBUG=${APP_DEBUG:-false}
# Database
- DB_HOST=${DB_HOST:-postgres}
- DB_PORT=${DB_PORT:-5432}
- DB_DATABASE=${DB_DATABASE}
- DB_USERNAME=${DB_USERNAME}
- DB_PASSWORD=${DB_PASSWORD}
# Redis
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD}
# Queue
- QUEUE_DRIVER=redis
- QUEUE_CONNECTION=default
- QUEUE_WORKER_SLEEP=${QUEUE_WORKER_SLEEP:-3}
- QUEUE_WORKER_TRIES=${QUEUE_WORKER_TRIES:-3}
- QUEUE_WORKER_TIMEOUT=${QUEUE_WORKER_TIMEOUT:-60}
volumes:
- app-storage:/var/www/html/storage
- app-logs:/var/www/html/storage/logs
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
command: php console.php queue:work --queue=default --timeout=${QUEUE_WORKER_TIMEOUT:-60}
healthcheck:
test: ["CMD-SHELL", "php -r 'exit(0);' && test -f /var/www/html/console.php || exit 1"]
interval: 60s
timeout: 10s
retries: 3
start_period: 30s
depends_on:
app:
condition: service_started
redis:
condition: service_started
# Scheduler (Cron Jobs)
scheduler:
image: git.michaelschiemer.de:5000/framework:latest
container_name: scheduler
restart: unless-stopped
networks:
- app-internal
environment:
- TZ=Europe/Berlin
- APP_ENV=${APP_ENV:-production}
- APP_DEBUG=${APP_DEBUG:-false}
# Database
- DB_HOST=${DB_HOST:-postgres}
- DB_PORT=${DB_PORT:-5432}
- DB_DATABASE=${DB_DATABASE}
- DB_USERNAME=${DB_USERNAME}
- DB_PASSWORD=${DB_PASSWORD}
# Redis
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD}
volumes:
- app-storage:/var/www/html/storage
- app-logs:/var/www/html/storage/logs
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
command: php console.php scheduler:run
healthcheck:
test: ["CMD-SHELL", "php -r 'exit(0);' && test -f /var/www/html/console.php || exit 1"]
interval: 60s
timeout: 10s
retries: 3
start_period: 30s
depends_on:
app:
condition: service_started
redis:
condition: service_started
volumes:
app-code:
name: app-code
app-storage:
name: app-storage
app-logs:
name: app-logs
redis-data:
name: redis-data
networks:
traefik-public:
external: true
app-internal:
external: true
name: app-internal

View File

@@ -17,4 +17,5 @@ MYSQL_PASSWORD=<generate-strong-password>
REDIS_PASSWORD=<generate-strong-password>
# Gitea Settings
DISABLE_REGISTRATION=true # Set to false to allow user registration
# Set to false to allow user registration
DISABLE_REGISTRATION=true

View File

@@ -177,9 +177,10 @@ The playbook will:
### Configuration File
Gitea configuration is managed via `app.ini` file:
- **Local file**: `deployment/stacks/gitea/app.ini` (for local development)
- **Production**: Generated from Ansible template `deployment/ansible/templates/gitea-app.ini.j2`
- The `app.ini` is mounted read-only into the container at `/data/gitea/conf/app.ini`
- **Template**: `deployment/ansible/templates/gitea-app.ini.j2` (Ansible template)
- **Production**: Generated from template and deployed via Ansible playbook `setup-gitea-initial-config.yml`
- The `app.ini` is copied to the container at `/data/gitea/conf/app.ini`
- **Important**: `app.ini` is a minimal configuration. Cache, Session, Queue, and other settings are controlled via `GITEA__...` environment variables in `docker-compose.yml` which override `app.ini` settings on every container start.
- Configuration is based on the official Gitea example: https://github.com/go-gitea/gitea/blob/main/custom/conf/app.example.ini
**Key Configuration Sections:**

View File

@@ -1,80 +0,0 @@
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Gitea Configuration File
;; This file is based on the official Gitea example configuration
;; https://github.com/go-gitea/gitea/blob/main/custom/conf/app.example.ini
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; General Settings
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
APP_NAME = Gitea: Git with a cup of tea
RUN_MODE = prod
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Server Configuration
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
[server]
PROTOCOL = http
DOMAIN = git.michaelschiemer.de
HTTP_ADDR = 0.0.0.0
HTTP_PORT = 3000
ROOT_URL = https://git.michaelschiemer.de/
PUBLIC_URL_DETECTION = auto
;; SSH Configuration
DISABLE_SSH = false
START_SSH_SERVER = true
SSH_DOMAIN = git.michaelschiemer.de
SSH_PORT = 22
SSH_LISTEN_PORT = 22
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Database Configuration
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
[database]
DB_TYPE = postgres
HOST = postgres:5432
NAME = gitea
USER = gitea
PASSWD = gitea_password
SSL_MODE = disable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Cache Configuration
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
[cache]
ENABLED = false
ADAPTER = memory
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Session Configuration
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
[session]
PROVIDER = file
PROVIDER_CONFIG = data/sessions
COOKIE_SECURE = true
COOKIE_NAME = i_like_gitea
GC_INTERVAL_TIME = 86400
SESSION_LIFE_TIME = 86400
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Queue Configuration
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
[queue]
TYPE = channel
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Service Configuration
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
[service]
DISABLE_REGISTRATION = true
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Actions Configuration
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
[actions]
ENABLED = true
;; Use "self" to use the current Gitea instance for actions (not GitHub)
;; Do NOT set DEFAULT_ACTIONS_URL to a custom URL - it's not supported
;; Leaving it unset or setting to "self" will use the current instance
;DEFAULT_ACTIONS_URL = self

View File

@@ -19,10 +19,22 @@ services:
- GITEA__database__NAME=${POSTGRES_DB:-gitea}
- GITEA__database__USER=${POSTGRES_USER:-gitea}
- GITEA__database__PASSWD=${POSTGRES_PASSWORD:-gitea_password}
- GITEA__cache__ENABLED=true
- GITEA__cache__ADAPTER=redis
- GITEA__cache__HOST=redis:6379
- GITEA__cache__PASSWORD=${REDIS_PASSWORD:-gitea_redis_password}
# Database connection pool settings to prevent "Timeout before authentication" errors
# These limit the number of concurrent connections and prevent connection pool exhaustion
# - MAX_OPEN_CONNS: Maximum number of open connections to the database
# - MAX_IDLE_CONNS: More warm connections to avoid constantly creating new sessions
# - CONN_MAX_LIFETIME: 10 minutes; idle connections are not recycled too quickly
# - CONN_MAX_IDLE_TIME: Clean up connections that are idle for too long
- GITEA__database__MAX_OPEN_CONNS=50
- GITEA__database__MAX_IDLE_CONNS=30
- GITEA__database__CONN_MAX_LIFETIME=600
- GITEA__database__CONN_MAX_IDLE_TIME=300
# Cache, Session, and Queue configuration via environment variables
# These override app.ini settings and are applied on every container start
# NOTE: Cache deaktiviert - Gitea 1.25 interpretiert GITEA__cache__HOST nicht korrekt
# (verbindet sich mit 127.0.0.1:6379 statt redis:6379). Session und Queue nutzen weiterhin Redis.
- GITEA__cache__ENABLED=false
- GITEA__cache__ADAPTER=memory
- GITEA__session__PROVIDER=redis
- GITEA__session__PROVIDER_CONFIG=network=tcp,addr=redis:6379,password=${REDIS_PASSWORD:-gitea_redis_password},db=0,pool_size=100,idle_timeout=180
- GITEA__queue__TYPE=redis
@@ -40,21 +52,18 @@ services:
labels:
- "traefik.enable=true"
# HTTP Router
# HTTP Router configuration
- "traefik.http.routers.gitea.rule=Host(`git.michaelschiemer.de`)"
- "traefik.http.routers.gitea.entrypoints=websecure"
- "traefik.http.routers.gitea.tls=true"
- "traefik.http.routers.gitea.tls.certresolver=letsencrypt"
# Priority to ensure this router is matched before catchall (catchall has no explicit priority, so default is 0)
- "traefik.http.routers.gitea.priority=100"
# Service
# Service configuration (Docker provider uses port, not url)
- "traefik.http.services.gitea.loadbalancer.server.port=3000"
# Use container name explicitly for host network mode
- "traefik.http.services.gitea.loadbalancer.server.scheme=http"
# Middleware
- "traefik.http.routers.gitea.middlewares=default-chain@file"
# Middleware chain (removed temporarily to test if it causes issues)
# - "traefik.http.routers.gitea.middlewares=security-headers-global@file,gzip-compression@file"
# Explicitly reference the service (like MinIO does)
- "traefik.http.routers.gitea.service=gitea"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/api/healthz"]
interval: 30s
@@ -73,7 +82,12 @@ services:
- POSTGRES_DB=gitea
- POSTGRES_USER=gitea
- POSTGRES_PASSWORD=gitea_password
command: postgres -c max_connections=300
command: >
postgres
-c max_connections=300
-c authentication_timeout=180
-c statement_timeout=30000
-c idle_in_transaction_session_timeout=30000
volumes:
- postgres-data:/var/lib/postgresql/data
healthcheck:

View File

@@ -1,33 +0,0 @@
[mysqld]
# Gitea-optimized MySQL configuration
# Character set
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci
# InnoDB settings
innodb_buffer_pool_size = 256M
innodb_log_file_size = 64M
innodb_flush_log_at_trx_commit = 2
innodb_flush_method = O_DIRECT
# Connection settings
max_connections = 200
max_allowed_packet = 64M
# Query cache (disabled in MySQL 8.0+)
# Performance schema
performance_schema = ON
# Logging
slow_query_log = 1
slow_query_log_file = /var/log/mysql/slow-queries.log
long_query_time = 2
# Binary logging for backups
log_bin = /var/log/mysql/mysql-bin.log
binlog_expire_logs_seconds = 604800 # 7 days
max_binlog_size = 100M
[client]
default-character-set = utf8mb4

View File

@@ -0,0 +1,47 @@
# Local Development Override for Monitoring Stack
# Usage: docker compose -f docker-compose.yml -f docker-compose.local.yml up -d
#
# IMPORTANT: Must use LIST format labels like base config to prevent merging
# Dictionary format + List format = Both remain (merged)
# List format + List format = Properly merged/replaced
#
# This override file:
# - Changes entrypoints from 'websecure' to 'web' (HTTP only for local dev)
# - Removes TLS/Let's Encrypt configuration by NOT including those labels
# - Keeps all other configuration from docker-compose.yml
services:
portainer:
labels:
- "traefik.enable=true"
- "traefik.http.routers.portainer.rule=Host(`portainer.${DOMAIN}`)"
- "traefik.http.routers.portainer.entrypoints=web"
- "traefik.http.routers.portainer.tls=false"
- "traefik.http.services.portainer.loadbalancer.server.port=9000"
prometheus:
labels:
- "traefik.enable=true"
- "traefik.http.routers.prometheus.rule=Host(`prometheus.${DOMAIN}`)"
- "traefik.http.routers.prometheus.entrypoints=web"
- "traefik.http.routers.prometheus.tls=false"
- "traefik.http.routers.prometheus.middlewares=prometheus-auth@docker"
- "traefik.http.middlewares.prometheus-auth.basicauth.users=${PROMETHEUS_AUTH}"
- "traefik.http.services.prometheus.loadbalancer.server.port=9090"
grafana:
environment:
# Update root URL for HTTP
- GF_SERVER_ROOT_URL=http://grafana.${DOMAIN}:8081
- GF_SECURITY_ADMIN_USER=${GRAFANA_ADMIN_USER}
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD}
- GF_USERS_ALLOW_SIGN_UP=false
- GF_INSTALL_PLUGINS=${GRAFANA_PLUGINS}
- GF_LOG_LEVEL=info
- GF_ANALYTICS_REPORTING_ENABLED=false
labels:
- "traefik.enable=true"
- "traefik.http.routers.grafana.rule=Host(`grafana.${DOMAIN}`)"
- "traefik.http.routers.grafana.entrypoints=web"
- "traefik.http.routers.grafana.tls=false"
- "traefik.http.services.grafana.loadbalancer.server.port=3000"

View File

@@ -0,0 +1,257 @@
# PostgreSQL Production Stack - Production Database
## Overview
Production-ready PostgreSQL 16 database stack with automated backup system and performance optimization.
**Features**:
- PostgreSQL 16 Alpine (lightweight, secure)
- Automated daily backups with configurable retention
- Performance-optimized configuration (2GB memory allocation)
- Health checks and automatic recovery
- Persistent storage with named volumes
- Isolated `postgres-production-internal` network
- Resource limits for stability
## Services
- **postgres-production** - PostgreSQL 16 database server
- **postgres-production-backup** - Automated backup service with cron scheduling
## Prerequisites
1. **Docker and Docker Compose** installed
2. **Environment file** created (`.env`)
## Configuration
### 1. Create Environment File
```bash
cp .env.example .env
```
### 2. Generate Secure Password
```bash
openssl rand -base64 32
```
Update `.env`:
```env
POSTGRES_DB=michaelschiemer
POSTGRES_USER=postgres
POSTGRES_PASSWORD=<generated-password>
BACKUP_RETENTION_DAYS=7
BACKUP_SCHEDULE=0 2 * * *
```
## Deployment
### Initial Setup
```bash
# Create environment file
cp .env.example .env
# Generate and set password
openssl rand -base64 32
# Update POSTGRES_PASSWORD in .env
# Start services
docker compose up -d
# Check logs
docker compose logs -f
# Verify health
docker compose ps
```
### Verify Deployment
```bash
# Check PostgreSQL is running
docker exec postgres-production pg_isready -U postgres -d michaelschiemer
# Expected: postgres-production:5432 - accepting connections
# Check backup service
docker compose logs postgres-production-backup
# Expected: Initial backup completed successfully
```
## Integration with Production Application Stack
The Production Application Stack connects to this database via the `postgres-production-internal` network.
**Connection Configuration** in `deployment/stacks/production/.env`:
```env
DB_HOST=postgres-production
DB_PORT=5432
DB_DATABASE=michaelschiemer
DB_USERNAME=postgres
DB_PASSWORD=<same-as-postgres-production-password>
```
**Network Connection**: The Production Application Stack must be connected to the `postgres-production-internal` network.
## Usage
### Database Access
#### From Host Machine
```bash
# Connect to database
docker exec -it postgres-production psql -U postgres -d michaelschiemer
# Run SQL query
docker exec postgres-production psql -U postgres -d michaelschiemer -c "SELECT version();"
```
#### From Application Container
```bash
# Connection string format
postgresql://postgres:password@postgres-production:5432/michaelschiemer
```
### Backup Management
#### Manual Backup
```bash
# Trigger manual backup
docker exec postgres-production-backup /scripts/backup.sh
# List backups
ls -lh backups/
# Example output:
# postgres_michaelschiemer_20250130_020000.sql.gz
```
#### Restore from Backup
```bash
# List available backups
docker exec postgres-production-backup ls -lh /backups
# Restore specific backup
docker exec -it postgres-production-backup /scripts/restore.sh /backups/postgres_michaelschiemer_20250130_020000.sql.gz
# ⚠️ WARNING: This will DROP and RECREATE the database!
```
## Network Isolation
This stack uses its own isolated network:
- **Network**: `postgres-production-internal`
- **Purpose**: Isolate Production database from other services
- **Access**: Only services explicitly connected to this network can access the database
**Connecting Application Stack**:
```yaml
# In deployment/stacks/production/docker-compose.production.yml
networks:
postgres-production-internal:
external: true
name: postgres-production-internal
```
## Security
### Network Isolation
- PostgreSQL only accessible via `postgres-production-internal` network
- No external ports exposed
- Service-to-service communication only
### Authentication
- Strong password required (generated with `openssl rand -base64 32`)
- No default passwords
- Password stored in environment variables only
## Monitoring
### Health Checks
```bash
# Check service health
docker compose ps
# Expected: Both services "healthy"
# Manual health check
docker exec postgres-production pg_isready -U postgres -d michaelschiemer
```
### Resource Usage
```bash
# Database container stats
docker stats postgres-production --no-stream
# Disk usage
docker exec postgres-production du -sh /var/lib/postgresql/data
```
### Logs
```bash
# PostgreSQL logs
docker compose logs postgres-production
# Backup logs
docker compose logs postgres-production-backup
# Real-time monitoring
docker compose logs -f
```
## Troubleshooting
### Database Won't Start
```bash
# Check logs
docker compose logs postgres-production
# Common issues:
# 1. Invalid configuration
docker exec postgres-production postgres --check
# 2. Permission issues
docker exec postgres-production ls -la /var/lib/postgresql/data
```
### Connection Refused from Application
```bash
# 1. Check PostgreSQL is running
docker compose ps postgres-production
# 2. Verify network
docker network inspect postgres-production-internal | grep postgres-production
# 3. Check if application is connected to network
docker network inspect postgres-production-internal | grep app
```
## Differences from Staging Stack
| Aspect | Production | Staging |
|--------|-----------|---------|
| **Container Name** | postgres-production | postgres-staging |
| **Network** | postgres-production-internal | postgres-staging-internal |
| **Volume** | postgres-production-data | postgres-staging-data |
| **Database** | michaelschiemer | michaelschiemer_staging |
| **Backup Retention** | 7 days (configurable) | 7 days (configurable) |
## Additional Resources
- **PostgreSQL Documentation**: https://www.postgresql.org/docs/16/
- **Performance Tuning**: https://wiki.postgresql.org/wiki/Performance_Optimization
- **Backup Best Practices**: https://www.postgresql.org/docs/16/backup.html

View File

@@ -0,0 +1,70 @@
# PostgreSQL Configuration for Production
# Optimized for 2GB memory allocation
# Connection Settings
listen_addresses = '*'
max_connections = 100
superuser_reserved_connections = 3
# Memory Settings (for 2GB allocation)
shared_buffers = 512MB
effective_cache_size = 1536MB
maintenance_work_mem = 128MB
work_mem = 5MB
# Checkpoint Settings
checkpoint_completion_target = 0.9
wal_buffers = 16MB
default_statistics_target = 100
# Query Planner
random_page_cost = 1.1
effective_io_concurrency = 200
# Write-Ahead Logging
wal_level = replica
max_wal_size = 2GB
min_wal_size = 1GB
# Logging
log_destination = 'stderr'
logging_collector = on
log_directory = 'log'
log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'
log_rotation_age = 1d
log_rotation_size = 100MB
log_line_prefix = '%m [%p] %u@%d '
log_timezone = 'Europe/Berlin'
# What to Log
log_min_duration_statement = 1000
log_checkpoints = on
log_connections = on
log_disconnections = on
log_lock_waits = on
log_statement = 'ddl'
# Autovacuum
autovacuum = on
autovacuum_max_workers = 3
autovacuum_naptime = 1min
# Client Connection Defaults
datestyle = 'iso, mdy'
timezone = 'Europe/Berlin'
lc_messages = 'en_US.utf8'
lc_monetary = 'en_US.utf8'
lc_numeric = 'en_US.utf8'
lc_time = 'en_US.utf8'
default_text_search_config = 'pg_catalog.english'
# Lock Management
deadlock_timeout = 1s
# Statement Timeout (prevent long-running queries)
statement_timeout = 30000 # 30 seconds
# Parallel Query Execution
max_parallel_workers_per_gather = 2
max_parallel_workers = 4
max_worker_processes = 4

View File

@@ -0,0 +1,72 @@
services:
# PostgreSQL Production Database
postgres-production:
image: postgres:16-alpine
container_name: postgres-production
restart: unless-stopped
networks:
- postgres-production-internal
environment:
- TZ=Europe/Berlin
- POSTGRES_DB=${POSTGRES_DB:-michaelschiemer}
- POSTGRES_USER=${POSTGRES_USER:-postgres}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- PGDATA=/var/lib/postgresql/data/pgdata
volumes:
- postgres-production-data:/var/lib/postgresql/data
- ./conf.d:/etc/postgresql/conf.d:ro
- ./backups:/backups
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
command:
- postgres
- -c
- config_file=/etc/postgresql/conf.d/postgresql.conf
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-michaelschiemer}"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
shm_size: 256mb
deploy:
resources:
limits:
memory: 2G
reservations:
memory: 512M
# Automated Backup Service for Production
postgres-production-backup:
image: postgres:16-alpine
container_name: postgres-production-backup
restart: unless-stopped
networks:
- postgres-production-internal
environment:
- TZ=Europe/Berlin
- POSTGRES_HOST=postgres-production
- POSTGRES_DB=${POSTGRES_DB:-michaelschiemer}
- POSTGRES_USER=${POSTGRES_USER:-postgres}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- BACKUP_RETENTION_DAYS=${BACKUP_RETENTION_DAYS:-7}
- BACKUP_SCHEDULE=${BACKUP_SCHEDULE:-0 2 * * *}
volumes:
- ./backups:/backups
- ./scripts:/scripts:ro
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
entrypoint: ["/scripts/backup-entrypoint.sh"]
depends_on:
postgres-production:
condition: service_healthy
volumes:
postgres-production-data:
name: postgres-production-data
networks:
postgres-production-internal:
name: postgres-production-internal
driver: bridge

View File

@@ -0,0 +1,23 @@
#!/bin/sh
set -e
echo "🔄 PostgreSQL Backup Service Starting..."
echo "📅 Backup Schedule: ${BACKUP_SCHEDULE}"
echo "🗑️ Retention: ${BACKUP_RETENTION_DAYS} days"
# Install cronie for scheduled backups
apk add --no-cache dcron
# Create cron job
echo "${BACKUP_SCHEDULE} /scripts/backup.sh >> /var/log/backup.log 2>&1" > /etc/crontabs/root
# Ensure backup directory exists
mkdir -p /backups
# Run initial backup
echo "🚀 Running initial backup..."
/scripts/backup.sh
# Start cron in foreground
echo "✅ Backup service ready - cron daemon starting"
crond -f -l 2

View File

@@ -0,0 +1,55 @@
#!/bin/sh
set -e
# Configuration
BACKUP_DIR="/backups"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="${BACKUP_DIR}/postgres_${POSTGRES_DB}_${TIMESTAMP}.sql.gz"
RETENTION_DAYS=${BACKUP_RETENTION_DAYS:-7}
echo "======================================"
echo "PostgreSQL Backup - $(date)"
echo "======================================"
# Wait for PostgreSQL to be ready
until PGPASSWORD="${POSTGRES_PASSWORD}" pg_isready -h "${POSTGRES_HOST}" -U "${POSTGRES_USER}" -d "${POSTGRES_DB}"; do
echo "⏳ Waiting for PostgreSQL..."
sleep 5
done
echo "✅ PostgreSQL is ready"
# Create backup
echo "📦 Creating backup: ${BACKUP_FILE}"
PGPASSWORD="${POSTGRES_PASSWORD}" pg_dump \
-h "${POSTGRES_HOST}" \
-U "${POSTGRES_USER}" \
-d "${POSTGRES_DB}" \
--clean \
--if-exists \
--create \
--no-owner \
--no-privileges \
| gzip > "${BACKUP_FILE}"
# Verify backup was created
if [ -f "${BACKUP_FILE}" ]; then
BACKUP_SIZE=$(du -h "${BACKUP_FILE}" | cut -f1)
echo "✅ Backup created successfully: ${BACKUP_SIZE}"
else
echo "❌ Backup failed!"
exit 1
fi
# Cleanup old backups
echo "🗑️ Cleaning up backups older than ${RETENTION_DAYS} days..."
find "${BACKUP_DIR}" -name "postgres_*.sql.gz" -type f -mtime +${RETENTION_DAYS} -delete
# List current backups
echo ""
echo "📊 Current backups:"
ls -lh "${BACKUP_DIR}"/postgres_*.sql.gz 2>/dev/null || echo "No backups found"
echo ""
echo "✅ Backup completed successfully"
echo "======================================"

View File

@@ -0,0 +1,55 @@
#!/bin/sh
set -e
# Configuration
BACKUP_DIR="/backups"
if [ -z "$1" ]; then
echo "Usage: ./restore.sh <backup-file>"
echo ""
echo "Available backups:"
ls -lh "${BACKUP_DIR}"/postgres_*.sql.gz 2>/dev/null || echo "No backups found"
exit 1
fi
BACKUP_FILE="$1"
if [ ! -f "${BACKUP_FILE}" ]; then
echo "❌ Backup file not found: ${BACKUP_FILE}"
exit 1
fi
echo "======================================"
echo "PostgreSQL Restore - $(date)"
echo "======================================"
echo "📦 Backup file: ${BACKUP_FILE}"
echo ""
# Wait for PostgreSQL to be ready
until PGPASSWORD="${POSTGRES_PASSWORD}" pg_isready -h "${POSTGRES_HOST}" -U "${POSTGRES_USER}" -d postgres; do
echo "⏳ Waiting for PostgreSQL..."
sleep 5
done
echo "✅ PostgreSQL is ready"
echo ""
# Warning
echo "⚠️ WARNING: This will DROP and RECREATE the database!"
echo "⚠️ Database: ${POSTGRES_DB}"
echo ""
echo "Press Ctrl+C to cancel, or wait 10 seconds to continue..."
sleep 10
echo ""
echo "🔄 Restoring database..."
# Restore backup
gunzip -c "${BACKUP_FILE}" | PGPASSWORD="${POSTGRES_PASSWORD}" psql \
-h "${POSTGRES_HOST}" \
-U "${POSTGRES_USER}" \
-d postgres
echo ""
echo "✅ Database restored successfully"
echo "======================================"

View File

@@ -0,0 +1,263 @@
# PostgreSQL Staging Stack - Staging Database
## Overview
Staging PostgreSQL 16 database stack with automated backup system for testing and development.
**Features**:
- PostgreSQL 16 Alpine (lightweight, secure)
- Automated daily backups with shorter retention (3 days)
- Performance-optimized configuration (1GB memory allocation)
- Health checks and automatic recovery
- Persistent storage with named volumes
- Isolated `postgres-staging-internal` network
- Resource limits optimized for staging workloads
## Services
- **postgres-staging** - PostgreSQL 16 database server
- **postgres-staging-backup** - Automated backup service with cron scheduling
## Prerequisites
1. **Docker and Docker Compose** installed
2. **Environment file** created (`.env`)
## Configuration
### 1. Create Environment File
```bash
cp .env.example .env
```
### 2. Generate Secure Password
```bash
openssl rand -base64 32
```
Update `.env`:
```env
POSTGRES_DB=michaelschiemer_staging
POSTGRES_USER=postgres
POSTGRES_PASSWORD=<generated-password>
BACKUP_RETENTION_DAYS=3
BACKUP_SCHEDULE=0 3 * * *
```
**Note**: Staging uses shorter backup retention (3 days) and runs backups at 3 AM (1 hour after production) to reduce resource contention.
## Deployment
### Initial Setup
```bash
# Create environment file
cp .env.example .env
# Generate and set password
openssl rand -base64 32
# Update POSTGRES_PASSWORD in .env
# Start services
docker compose up -d
# Check logs
docker compose logs -f
# Verify health
docker compose ps
```
### Verify Deployment
```bash
# Check PostgreSQL is running
docker exec postgres-staging pg_isready -U postgres -d michaelschiemer_staging
# Expected: postgres-staging:5432 - accepting connections
# Check backup service
docker compose logs postgres-staging-backup
# Expected: Initial backup completed successfully
```
## Integration with Staging Application Stack
The Staging Application Stack connects to this database via the `postgres-staging-internal` network.
**Connection Configuration** in `docker-compose.staging.yml`:
```env
DB_HOST=postgres-staging
DB_PORT=5432
DB_DATABASE=michaelschiemer_staging
DB_USERNAME=postgres
DB_PASSWORD=<same-as-postgres-staging-password>
```
**Network Connection**: The Staging Application Stack must be connected to the `postgres-staging-internal` network.
## Usage
### Database Access
#### From Host Machine
```bash
# Connect to database
docker exec -it postgres-staging psql -U postgres -d michaelschiemer_staging
# Run SQL query
docker exec postgres-staging psql -U postgres -d michaelschiemer_staging -c "SELECT version();"
```
#### From Application Container
```bash
# Connection string format
postgresql://postgres:password@postgres-staging:5432/michaelschiemer_staging
```
### Backup Management
#### Manual Backup
```bash
# Trigger manual backup
docker exec postgres-staging-backup /scripts/backup.sh
# List backups
ls -lh backups/
# Example output:
# postgres_michaelschiemer_staging_20250130_030000.sql.gz
```
#### Restore from Backup
```bash
# List available backups
docker exec postgres-staging-backup ls -lh /backups
# Restore specific backup
docker exec -it postgres-staging-backup /scripts/restore.sh /backups/postgres_michaelschiemer_staging_20250130_030000.sql.gz
# ⚠️ WARNING: This will DROP and RECREATE the database!
```
## Network Isolation
This stack uses its own isolated network:
- **Network**: `postgres-staging-internal`
- **Purpose**: Isolate Staging database from Production and other services
- **Access**: Only services explicitly connected to this network can access the database
**Connecting Application Stack**:
```yaml
# In docker-compose.staging.yml
networks:
postgres-staging-internal:
external: true
name: postgres-staging-internal
```
## Security
### Network Isolation
- PostgreSQL only accessible via `postgres-staging-internal` network
- No external ports exposed
- Service-to-service communication only
### Authentication
- Strong password required (generated with `openssl rand -base64 32`)
- No default passwords
- Password stored in environment variables only
**Note**: Staging passwords can be different from Production, but should still be secure.
## Monitoring
### Health Checks
```bash
# Check service health
docker compose ps
# Expected: Both services "healthy"
# Manual health check
docker exec postgres-staging pg_isready -U postgres -d michaelschiemer_staging
```
### Resource Usage
```bash
# Database container stats
docker stats postgres-staging --no-stream
# Disk usage
docker exec postgres-staging du -sh /var/lib/postgresql/data
```
### Logs
```bash
# PostgreSQL logs
docker compose logs postgres-staging
# Backup logs
docker compose logs postgres-staging-backup
# Real-time monitoring
docker compose logs -f
```
## Troubleshooting
### Database Won't Start
```bash
# Check logs
docker compose logs postgres-staging
# Common issues:
# 1. Invalid configuration
docker exec postgres-staging postgres --check
# 2. Permission issues
docker exec postgres-staging ls -la /var/lib/postgresql/data
```
### Connection Refused from Application
```bash
# 1. Check PostgreSQL is running
docker compose ps postgres-staging
# 2. Verify network
docker network inspect postgres-staging-internal | grep postgres-staging
# 3. Check if application is connected to network
docker network inspect postgres-staging-internal | grep staging-app
```
## Differences from Production Stack
| Aspect | Production | Staging |
|--------|-----------|---------|
| **Container Name** | postgres-production | postgres-staging |
| **Network** | postgres-production-internal | postgres-staging-internal |
| **Volume** | postgres-production-data | postgres-staging-data |
| **Database** | michaelschiemer | michaelschiemer_staging |
| **Memory Limit** | 2GB | 1GB |
| **Backup Retention** | 7 days | 3 days |
| **Backup Schedule** | 2 AM | 3 AM |
## Additional Resources
- **PostgreSQL Documentation**: https://www.postgresql.org/docs/16/
- **Performance Tuning**: https://wiki.postgresql.org/wiki/Performance_Optimization
- **Backup Best Practices**: https://www.postgresql.org/docs/16/backup.html

View File

@@ -0,0 +1,70 @@
# PostgreSQL Configuration for Production
# Optimized for 2GB memory allocation
# Connection Settings
listen_addresses = '*'
max_connections = 100
superuser_reserved_connections = 3
# Memory Settings (for 2GB allocation)
shared_buffers = 512MB
effective_cache_size = 1536MB
maintenance_work_mem = 128MB
work_mem = 5MB
# Checkpoint Settings
checkpoint_completion_target = 0.9
wal_buffers = 16MB
default_statistics_target = 100
# Query Planner
random_page_cost = 1.1
effective_io_concurrency = 200
# Write-Ahead Logging
wal_level = replica
max_wal_size = 2GB
min_wal_size = 1GB
# Logging
log_destination = 'stderr'
logging_collector = on
log_directory = 'log'
log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'
log_rotation_age = 1d
log_rotation_size = 100MB
log_line_prefix = '%m [%p] %u@%d '
log_timezone = 'Europe/Berlin'
# What to Log
log_min_duration_statement = 1000
log_checkpoints = on
log_connections = on
log_disconnections = on
log_lock_waits = on
log_statement = 'ddl'
# Autovacuum
autovacuum = on
autovacuum_max_workers = 3
autovacuum_naptime = 1min
# Client Connection Defaults
datestyle = 'iso, mdy'
timezone = 'Europe/Berlin'
lc_messages = 'en_US.utf8'
lc_monetary = 'en_US.utf8'
lc_numeric = 'en_US.utf8'
lc_time = 'en_US.utf8'
default_text_search_config = 'pg_catalog.english'
# Lock Management
deadlock_timeout = 1s
# Statement Timeout (prevent long-running queries)
statement_timeout = 30000 # 30 seconds
# Parallel Query Execution
max_parallel_workers_per_gather = 2
max_parallel_workers = 4
max_worker_processes = 4

View File

@@ -0,0 +1,72 @@
services:
# PostgreSQL Staging Database
postgres-staging:
image: postgres:16-alpine
container_name: postgres-staging
restart: unless-stopped
networks:
- postgres-staging-internal
environment:
- TZ=Europe/Berlin
- POSTGRES_DB=${POSTGRES_DB:-michaelschiemer_staging}
- POSTGRES_USER=${POSTGRES_USER:-postgres}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- PGDATA=/var/lib/postgresql/data/pgdata
volumes:
- postgres-staging-data:/var/lib/postgresql/data
- ./conf.d:/etc/postgresql/conf.d:ro
- ./backups:/backups
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
command:
- postgres
- -c
- config_file=/etc/postgresql/conf.d/postgresql.conf
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-michaelschiemer_staging}"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
shm_size: 256mb
deploy:
resources:
limits:
memory: 1G
reservations:
memory: 256M
# Automated Backup Service for Staging
postgres-staging-backup:
image: postgres:16-alpine
container_name: postgres-staging-backup
restart: unless-stopped
networks:
- postgres-staging-internal
environment:
- TZ=Europe/Berlin
- POSTGRES_HOST=postgres-staging
- POSTGRES_DB=${POSTGRES_DB:-michaelschiemer_staging}
- POSTGRES_USER=${POSTGRES_USER:-postgres}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- BACKUP_RETENTION_DAYS=${BACKUP_RETENTION_DAYS:-3}
- BACKUP_SCHEDULE=${BACKUP_SCHEDULE:-0 3 * * *}
volumes:
- ./backups:/backups
- ./scripts:/scripts:ro
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
entrypoint: ["/scripts/backup-entrypoint.sh"]
depends_on:
postgres-staging:
condition: service_healthy
volumes:
postgres-staging-data:
name: postgres-staging-data
networks:
postgres-staging-internal:
name: postgres-staging-internal
driver: bridge

View File

@@ -0,0 +1,23 @@
#!/bin/sh
set -e
echo "🔄 PostgreSQL Backup Service Starting..."
echo "📅 Backup Schedule: ${BACKUP_SCHEDULE}"
echo "🗑️ Retention: ${BACKUP_RETENTION_DAYS} days"
# Install cronie for scheduled backups
apk add --no-cache dcron
# Create cron job
echo "${BACKUP_SCHEDULE} /scripts/backup.sh >> /var/log/backup.log 2>&1" > /etc/crontabs/root
# Ensure backup directory exists
mkdir -p /backups
# Run initial backup
echo "🚀 Running initial backup..."
/scripts/backup.sh
# Start cron in foreground
echo "✅ Backup service ready - cron daemon starting"
crond -f -l 2

View File

@@ -0,0 +1,55 @@
#!/bin/sh
set -e
# Configuration
BACKUP_DIR="/backups"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="${BACKUP_DIR}/postgres_${POSTGRES_DB}_${TIMESTAMP}.sql.gz"
RETENTION_DAYS=${BACKUP_RETENTION_DAYS:-7}
echo "======================================"
echo "PostgreSQL Backup - $(date)"
echo "======================================"
# Wait for PostgreSQL to be ready
until PGPASSWORD="${POSTGRES_PASSWORD}" pg_isready -h "${POSTGRES_HOST}" -U "${POSTGRES_USER}" -d "${POSTGRES_DB}"; do
echo "⏳ Waiting for PostgreSQL..."
sleep 5
done
echo "✅ PostgreSQL is ready"
# Create backup
echo "📦 Creating backup: ${BACKUP_FILE}"
PGPASSWORD="${POSTGRES_PASSWORD}" pg_dump \
-h "${POSTGRES_HOST}" \
-U "${POSTGRES_USER}" \
-d "${POSTGRES_DB}" \
--clean \
--if-exists \
--create \
--no-owner \
--no-privileges \
| gzip > "${BACKUP_FILE}"
# Verify backup was created
if [ -f "${BACKUP_FILE}" ]; then
BACKUP_SIZE=$(du -h "${BACKUP_FILE}" | cut -f1)
echo "✅ Backup created successfully: ${BACKUP_SIZE}"
else
echo "❌ Backup failed!"
exit 1
fi
# Cleanup old backups
echo "🗑️ Cleaning up backups older than ${RETENTION_DAYS} days..."
find "${BACKUP_DIR}" -name "postgres_*.sql.gz" -type f -mtime +${RETENTION_DAYS} -delete
# List current backups
echo ""
echo "📊 Current backups:"
ls -lh "${BACKUP_DIR}"/postgres_*.sql.gz 2>/dev/null || echo "No backups found"
echo ""
echo "✅ Backup completed successfully"
echo "======================================"

View File

@@ -0,0 +1,55 @@
#!/bin/sh
set -e
# Configuration
BACKUP_DIR="/backups"
if [ -z "$1" ]; then
echo "Usage: ./restore.sh <backup-file>"
echo ""
echo "Available backups:"
ls -lh "${BACKUP_DIR}"/postgres_*.sql.gz 2>/dev/null || echo "No backups found"
exit 1
fi
BACKUP_FILE="$1"
if [ ! -f "${BACKUP_FILE}" ]; then
echo "❌ Backup file not found: ${BACKUP_FILE}"
exit 1
fi
echo "======================================"
echo "PostgreSQL Restore - $(date)"
echo "======================================"
echo "📦 Backup file: ${BACKUP_FILE}"
echo ""
# Wait for PostgreSQL to be ready
until PGPASSWORD="${POSTGRES_PASSWORD}" pg_isready -h "${POSTGRES_HOST}" -U "${POSTGRES_USER}" -d postgres; do
echo "⏳ Waiting for PostgreSQL..."
sleep 5
done
echo "✅ PostgreSQL is ready"
echo ""
# Warning
echo "⚠️ WARNING: This will DROP and RECREATE the database!"
echo "⚠️ Database: ${POSTGRES_DB}"
echo ""
echo "Press Ctrl+C to cancel, or wait 10 seconds to continue..."
sleep 10
echo ""
echo "🔄 Restoring database..."
# Restore backup
gunzip -c "${BACKUP_FILE}" | PGPASSWORD="${POSTGRES_PASSWORD}" psql \
-h "${POSTGRES_HOST}" \
-U "${POSTGRES_USER}" \
-d postgres
echo ""
echo "✅ Database restored successfully"
echo "======================================"

View File

@@ -49,8 +49,8 @@ docker build -t registry.michaelschiemer.de/michaelschiemer-app:latest .
docker push registry.michaelschiemer.de/michaelschiemer-app:latest
```
### 5. Database Available
Stack 5 (PostgreSQL/MySQL) must be running or external database configured.
### 5. PostgreSQL Production Stack Available
The separate `postgresql-production` stack must be running. See `deployment/stacks/postgresql-production/README.md` for setup.
## Configuration
@@ -278,10 +278,13 @@ docker compose pull app queue-worker scheduler
docker compose up -d --force-recreate app queue-worker scheduler
```
### Stack 5: Database (PostgreSQL or MySQL)
### Database Stack: PostgreSQL Production (Separate Stack)
**Connection**:
- Database service must be on same Docker network or externally accessible
- Production Application Stack connects to separate `postgresql-production` stack
- Database container: `postgres-production`
- Network: `postgres-production-internal`
- Database service must be on same Docker network (`postgres-production-internal`)
- Connection configured via DB_HOST, DB_PORT, DB_NAME, DB_USER, DB_PASS
**Run Migrations**:

View File

@@ -0,0 +1,8 @@
# Redis Stack Environment Configuration
# Copy to .env and adjust for your environment
# Redis Password (REQUIRED in production)
REDIS_PASSWORD=your-secure-redis-password-here
# Timezone
TZ=Europe/Berlin

View File

@@ -0,0 +1,291 @@
# Redis Stack
External Redis Stack for the Custom PHP Framework application.
## Overview
This Redis Stack provides:
- **Cache Backend**: Application-level caching (sessions, query results, objects)
- **Queue Backend**: Background job queue storage
- **Session Storage**: User session persistence
- **Rate Limiting**: Request rate limiting storage
**Architecture Pattern**: Following the same external stack pattern as PostgreSQL for consistency and separation of concerns.
## Quick Start
```bash
# 1. Create environment configuration
cp .env.example .env
# 2. Set Redis password
echo "REDIS_PASSWORD=$(openssl rand -base64 32)" >> .env
# 3. Create app-internal network (if not exists)
docker network create app-internal
# 4. Start Redis Stack
docker compose up -d
# 5. Verify health
docker compose ps
docker compose exec redis-stack redis-cli ping
# Expected: PONG
```
## Configuration
### Environment Variables
```env
REDIS_PASSWORD=your-secure-password # Required for production
TZ=Europe/Berlin # Timezone for logs
```
### Redis Configuration
Redis configuration is in `redis.conf` with production-optimized settings:
**Persistence:**
- RDB Snapshots: 15min/5min/1min intervals
- AOF (Append Only File): `everysec` fsync policy
- Combined persistence for data durability
**Memory Management:**
- Max Memory: 512MB (configurable)
- Eviction Policy: `allkeys-lru` (Least Recently Used)
- Lazy freeing for async deletion
**Performance:**
- TCP keepalive: 300s
- Active rehashing enabled
- Optimized client output buffers
### Security
**Password Protection:**
```bash
# Set via Docker Secrets (recommended)
echo "your-redis-password" | docker secret create redis_password -
# Or via environment variable
REDIS_PASSWORD=your-password docker compose up -d
```
**Network Isolation:**
- Only accessible via `app-internal` Docker network
- Not exposed to public internet
- Protected mode enabled
## Health Monitoring
```bash
# Check Redis health
docker compose exec redis-stack redis-cli ping
# Get Redis info
docker compose exec redis-stack redis-cli info
# Monitor real-time activity
docker compose exec redis-stack redis-cli monitor
# Check memory usage
docker compose exec redis-stack redis-cli info memory
# View slow queries
docker compose exec redis-stack redis-cli slowlog get 10
```
## Integration with Application Stack
### Production Setup
```bash
# In application root directory
docker compose \
-f docker-compose.base.yml \
-f docker-compose.production.yml \
-f docker-compose.postgres-override.yml \
-f docker-compose.redis-override.yml \
up -d
```
### Application Environment Variables
```env
REDIS_HOST=redis-stack # Container name from this stack
REDIS_PORT=6379
REDIS_PASSWORD=your-password # Same as Redis Stack password
```
## Maintenance
### Backup
Redis persistence is handled automatically via:
- **RDB**: Snapshot backups in `/data/dump.rdb`
- **AOF**: Append-only log in `/data/appendonly.aof`
**Manual Backup:**
```bash
# Trigger immediate RDB snapshot
docker compose exec redis-stack redis-cli BGSAVE
# Copy backup files
docker cp redis-stack:/data/dump.rdb ./backups/redis-$(date +%Y%m%d).rdb
```
### Restore
```bash
# Stop Redis
docker compose stop redis-stack
# Restore backup
docker cp ./backups/redis-backup.rdb redis-stack:/data/dump.rdb
# Start Redis
docker compose start redis-stack
```
### Cleanup
```bash
# Clear specific database
docker compose exec redis-stack redis-cli -n 0 FLUSHDB
# Clear all databases (DANGEROUS!)
docker compose exec redis-stack redis-cli FLUSHALL
# Remove old AOF/RDB files
docker compose exec redis-stack sh -c "rm -f /data/*.aof.old /data/*.rdb.old"
```
## Performance Tuning
### Memory Optimization
```bash
# Check memory fragmentation
docker compose exec redis-stack redis-cli info memory | grep fragmentation
# Defragment if needed (Redis 4.0+)
docker compose exec redis-stack redis-cli MEMORY PURGE
```
### Connection Limits
Adjust `maxclients` in `redis.conf` based on your application needs:
```conf
maxclients 10000 # Default, increase if needed
```
### Persistence Trade-offs
**For Cache-Only Usage:**
```conf
# Disable persistence for maximum performance
save ""
appendonly no
```
**For Critical Data:**
```conf
# More frequent snapshots
save 300 1
save 60 10
appendfsync always # Slower but safest
```
## Troubleshooting
### Connection Issues
```bash
# Test connection from application container
docker exec php sh -c 'redis-cli -h redis-stack -a $REDIS_PASSWORD ping'
# Check network connectivity
docker network inspect app-internal
```
### Memory Issues
```bash
# Check current memory usage
docker compose exec redis-stack redis-cli info memory
# View evicted keys
docker compose exec redis-stack redis-cli info stats | grep evicted
# Increase memory limit (edit docker-compose.yml)
# deploy.resources.limits.memory: 1G
```
### Performance Issues
```bash
# Check slow queries
docker compose exec redis-stack redis-cli slowlog get 10
# Monitor commands in real-time
docker compose exec redis-stack redis-cli monitor
# Analyze key patterns
docker compose exec redis-stack redis-cli --bigkeys
```
## Architecture
### Network Topology
```
┌─────────────────────────────────────────┐
│ app-internal Network │
│ │
│ ┌──────────┐ ┌──────────────┐ │
│ │ PHP │────────▶│ redis-stack │ │
│ │ App │ │ (Cache) │ │
│ └──────────┘ └──────────────┘ │
│ │
│ ┌──────────┐ │
│ │ Queue │────────▶ │
│ │ Worker │ (Shared Redis) │
│ └──────────┘ │
│ │
└─────────────────────────────────────────┘
```
### Data Separation
**Database Indexes:**
- DB 0: Application cache (default)
- DB 1: Session storage
- DB 2: Queue backend
- DB 3: Rate limiting
- DB 4-15: Available for other uses
## Best Practices
1. **Always use passwords** in production
2. **Monitor memory usage** - set appropriate `maxmemory` limit
3. **Enable persistence** for critical data (sessions, queue)
4. **Regular backups** if using Redis as primary data store
5. **Network isolation** - never expose Redis port publicly
6. **Health checks** - monitor Redis availability
7. **Resource limits** - set Docker memory limits
8. **Logging** - check Redis logs for issues
## Related Documentation
- [Application Docker Compose](../../docker-compose.production.yml)
- [Redis Override Configuration](../../docker-compose.redis-override.yml)
- [PostgreSQL Stack](../postgresql/README.md) (similar pattern)
- [Redis Official Documentation](https://redis.io/documentation)
## Support
For issues or questions:
- Check Redis logs: `docker compose logs redis-stack`
- Test connectivity: `docker compose exec redis-stack redis-cli ping`
- Review configuration: `docker compose exec redis-stack redis-cli config get '*'`

View File

@@ -0,0 +1,37 @@
services:
# Redis Cache & Queue Backend
redis-stack:
image: redis:7-alpine
container_name: redis-stack
restart: unless-stopped
networks:
- app-internal
environment:
- TZ=Europe/Berlin
volumes:
- redis-data:/data
- ./redis.conf:/usr/local/etc/redis/redis.conf:ro
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
command: >
redis-server /usr/local/etc/redis/redis.conf
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 128M
volumes:
redis-data:
name: redis-data
networks:
app-internal:
external: true

View File

@@ -0,0 +1,81 @@
# Redis Configuration for Production Stack
# Based on Redis 7.x best practices
# Network Configuration
bind 0.0.0.0
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
# General Settings
daemonize no
supervised no
pidfile /var/run/redis.pid
loglevel notice
databases 16
# Persistence Configuration
# RDB Snapshots
# After 900 sec (15 min) if at least 1 key changed
save 900 1
# After 300 sec (5 min) if at least 10 keys changed
save 300 10
# After 60 sec if at least 10000 keys changed
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /data
# AOF Persistence (Append Only File)
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# Memory Management
maxmemory 512mb
maxmemory-policy allkeys-lru
maxmemory-samples 5
# Lazy Freeing (async deletion)
lazyfree-lazy-eviction yes
lazyfree-lazy-expire yes
lazyfree-lazy-server-del yes
replica-lazy-flush yes
# Security
# NOTE: Password should be set via REDIS_PASSWORD environment variable
# requirepass will be set via redis-cli CONFIG SET after startup
protected-mode yes
# Limits
maxclients 10000
# Slow Log
slowlog-log-slower-than 10000
slowlog-max-len 128
# Advanced Config
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# Performance Tuning
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Disable dangerous commands (optional)
# rename-command FLUSHDB ""
# rename-command FLUSHALL ""
# rename-command CONFIG ""

View File

@@ -0,0 +1,2 @@
admin:$2y$05$frHsEHd3NmM.LUMZ86dJe.aAZK5D5Qoh1w0mnXY0pfbbFmtU8aD5y

View File

@@ -24,11 +24,17 @@ Separate staging environment for testing features before production deployment.
## Prerequisites
1. **Traefik Stack Running** (shared with production)
2. **DNS Configuration**: `staging.michaelschiemer.de` points to server IP
3. **Docker Networks**:
2. **PostgreSQL Staging Stack Running** (separate from production)
```bash
cd ../postgresql-staging
docker compose up -d
```
3. **DNS Configuration**: `staging.michaelschiemer.de` points to server IP
4. **Docker Networks**:
```bash
docker network create traefik-public # Shared with production
docker network create staging-internal # Staging-only
docker network create postgres-staging-internal # Staging database network
```
## Configuration
@@ -47,9 +53,9 @@ APP_ENV=staging
APP_DEBUG=true
APP_URL=https://staging.michaelschiemer.de
# Database (can use separate staging DB or share)
# Database - using separate staging database stack
DB_DATABASE=michaelschiemer_staging
DB_HOST=postgres
DB_HOST=postgres-staging
DB_PORT=5432
DB_USERNAME=postgres
DB_PASSWORD=<password>
@@ -124,7 +130,8 @@ docker compose logs -f staging-nginx
| **APP_ENV** | production | staging |
| **APP_DEBUG** | false | true |
| **Redis** | Separate instance | Separate instance |
| **Database** | michaelschiemer | michaelschiemer_staging |
| **Database** | michaelschiemer (postgres-production) | michaelschiemer_staging (postgres-staging) |
| **Database Stack** | postgresql-production (separate) | postgresql-staging (separate) |
| **Volumes** | app-* | staging-* |
| **Network** | app-internal | staging-internal |

View File

@@ -11,10 +11,9 @@ Traefik acts as the central reverse proxy for all services, handling:
## Services
- **traefik.michaelschiemer.de** - Traefik Dashboard (VPN-only + BasicAuth protected)
- ?? **Nur ?ber WireGuard VPN erreichbar** (10.8.0.0/24)
- Zus?tzlich durch BasicAuth gesch?tzt
- ?ffentlicher Zugriff ist blockiert
- **traefik.michaelschiemer.de** - Traefik Dashboard (BasicAuth protected)
- Protected by BasicAuth authentication
- Accessible via HTTPS with Let's Encrypt certificate
## Local Development
@@ -33,7 +32,8 @@ docker compose -f docker-compose.local.yml up -d
# Check logs
docker compose -f docker-compose.local.yml logs -f
# Access dashboard at http://localhost:8080/dashboard/
# Access dashboard at http://localhost:8093/dashboard/
# Note: Dashboard is served on the API port (8093) when api.insecure=true
```
### Local Development Configuration
@@ -41,11 +41,12 @@ docker compose -f docker-compose.local.yml logs -f
The local configuration (`docker-compose.local.yml` and `traefik.local.yml`) differs from production:
- **Bridge network** instead of `host` mode (avoids port conflicts)
- **Port mappings**: `8080:80` only (HTTP-only for local development)
- **Port mappings**: `8081:80` (HTTP) and `8093:8080` (API/Dashboard)
- Note: HTTPS not needed locally - avoids port conflicts with web container (8443:443)
- **HTTP-only** (no ACME/Let's Encrypt) for local development
- **Dashboard**: Accessible at `http://localhost:8080/dashboard/` (HTTP, no authentication)
- Also available: `http://localhost:8080/api/rawdata` and `http://localhost:8080/api/http/routers`
- **Dashboard**: Accessible at `http://localhost:8093/dashboard/` (HTTP, no authentication)
- Also available: `http://localhost:8093/api/rawdata` and `http://localhost:8093/api/http/routers`
- Note: Dashboard is served on the API port (8093) when `api.insecure=true` in `traefik.local.yml`
- **No `acme.json`** required
- **Console logging** (human-readable) instead of JSON file logs
@@ -53,18 +54,18 @@ The local configuration (`docker-compose.local.yml` and `traefik.local.yml`) dif
| Feature | Local (`docker-compose.local.yml`) | Production (`docker-compose.yml`) |
|---------|-----------------------------------|----------------------------------|
| Network Mode | Bridge | Host |
| Ports | 8080:80 (HTTP only) | Direct binding (80, 443) |
| Network Mode | Bridge | Bridge |
| Ports | 8081:80, 8093:8080 (HTTP only) | 80:80, 443:443, 2222:2222 |
| SSL/TLS | HTTP-only | HTTPS with Let's Encrypt |
| Dashboard | `http://localhost:8080/dashboard/` | `https://traefik.michaelschiemer.de` |
| Authentication | None (local dev) | VPN + BasicAuth |
| Dashboard | `http://localhost:8093/dashboard/` | `https://traefik.michaelschiemer.de` |
| Authentication | None (local dev) | BasicAuth |
| Logging | Console (human-readable) | JSON files |
| ACME | Disabled | Enabled |
### Troubleshooting Local Development
**Container restarts in loop:**
- Check if port 8080 is already in use: `netstat -tlnp | grep ':8080' || ss -tlnp | grep ':8080'`
- Check if ports 8081 or 8093 are already in use: `netstat -tlnp | grep -E ':(8081|8093)' || ss -tlnp | grep -E ':(8081|8093)'`
- Verify Docker network exists: `docker network ls | grep traefik-public`
- Check logs: `docker compose -f docker-compose.local.yml logs -f traefik`

View File

@@ -3,7 +3,7 @@
#
# This configuration is optimized for local development:
# - Bridge network instead of host mode
# - Port mapping: 8080:80 (HTTP only - HTTPS not needed for local dev)
# - Port mappings: 8081:80 (HTTP) and 8093:8080 (API/Dashboard)
# Note: 8443:443 is used by the web container, and we don't need HTTPS for Traefik locally
# - No ACME/Let's Encrypt (HTTP-only)
# - Simplified healthcheck
@@ -24,6 +24,9 @@ services:
- "8093:8080" # Traefik API entrypoint (for api.insecure=true dashboard)
environment:
- TZ=Europe/Berlin
command:
# Load static configuration file
- "--configFile=/traefik.yml"
volumes:
# Docker socket for service discovery
- /var/run/docker.sock:/var/run/docker.sock:ro
@@ -42,10 +45,8 @@ services:
# For now, we'll try without labels and see if api.insecure=true works directly.
- "traefik.enable=true"
healthcheck:
# Use wget or curl to check Traefik ping endpoint
# The ping endpoint is configured in traefik.local.yml on the 'web' entrypoint
# Try ping endpoint first, if that fails, try API endpoint
test: ["CMD-SHELL", "wget --quiet --spider http://localhost:80/ping || wget --quiet --spider http://localhost:80/api/rawdata || exit 1"]
# Use Traefik's built-in healthcheck command (works in minimal image)
test: ["CMD", "traefik", "healthcheck", "--ping"]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -0,0 +1,17 @@
# Gitea Router Configuration
# Router definition for Gitea using File Provider
http:
routers:
gitea:
rule: Host(`git.michaelschiemer.de`)
entryPoints:
- websecure
service: gitea
tls:
certResolver: letsencrypt
middlewares:
- security-headers-global
- gzip-compression
priority: 100

View File

@@ -1,5 +1,7 @@
# TCP Router Configuration for Gitea SSH
# Routes SSH traffic on port 2222 to Gitea container
# Note: Gitea SSH is handled via port mapping in docker-compose.yml
# This file is kept for reference but may not be needed
tcp:
routers:
@@ -13,8 +15,5 @@ tcp:
gitea-ssh-service:
loadBalancer:
servers:
# Gitea container SSH service
# Note: Using container IP from gitea_gitea-internal network
# Traefik runs in host network mode, so we need the actual container IP
# IP address: 172.23.0.3 (gitea_gitea-internal network)
- address: "172.23.0.3:22"
# Use container name in bridge network mode
- address: "gitea:22"

View File

@@ -1,20 +0,0 @@
# Gitea configuration is now handled via Docker labels in docker-compose.yml
# This file is kept for reference but is not used
# Traefik will automatically discover Gitea via Docker labels and use the container IP
# when running in host network mode
#
# http:
# routers:
# gitea:
# rule: Host(`git.michaelschiemer.de`)
# entrypoints:
# - websecure
# service: gitea
# tls:
# certResolver: letsencrypt
# priority: 100
# services:
# gitea:
# loadBalancer:
# servers:
# - url: http://gitea:3000

View File

@@ -43,14 +43,6 @@ http:
average: 200
burst: 100
period: 1s
# IP whitelist for admin services (example)
# Uncomment and adjust for production
# admin-whitelist:
# ipWhiteList:
# sourceRange:
# - "127.0.0.1/32"
# - "10.0.0.0/8"
# Chain multiple middlewares
default-chain:
@@ -65,4 +57,3 @@ http:
- security-headers-global
- gzip-compression
- rate-limit-strict
# - admin-whitelist # Uncomment for IP whitelisting