feat: update deployment configuration and encrypted env loader

- Update Ansible playbooks and roles for application deployment
- Add new Gitea/Traefik troubleshooting playbooks
- Update Docker Compose configurations (base, local, staging, production)
- Enhance EncryptedEnvLoader with improved error handling
- Add deployment scripts (autossh setup, migration, secret testing)
- Update CI/CD workflows and documentation
- Add Semaphore stack configuration
This commit is contained in:
2025-11-02 20:38:06 +01:00
parent 7b7f0b41d2
commit 24cbbccf4c
44 changed files with 5280 additions and 276 deletions

View File

@@ -123,12 +123,22 @@ jobs:
fi
if [ -z "$CHANGED_FILES" ] && [ "$FORCE" != "true" ]; then
# No diff information available; fall back to building to stay safe
echo "⚠️ Keine Änderungsinformation gefunden bilde Image sicherheitshalber."
echo "needs_build=true" >> "$GITHUB_OUTPUT"
echo "changed_files=<none>" >> "$GITHUB_OUTPUT"
echo "needs_runtime_build=true" >> "$GITHUB_OUTPUT"
exit 0
# No diff information available; assume no build needed if this is not initial commit
# Only skip if we can detect this is not the first commit
if git rev-parse HEAD^ >/dev/null 2>&1; then
echo " Keine Änderungsinformation gefunden, aber HEAD^ existiert überspringe Build."
echo "needs_build=false" >> "$GITHUB_OUTPUT"
echo "changed_files=<none>" >> "$GITHUB_OUTPUT"
echo "needs_runtime_build=false" >> "$GITHUB_OUTPUT"
exit 0
else
# First commit or detached state - build to be safe
echo "⚠️ Keine Änderungsinformation gefunden bilde Image sicherheitshalber."
echo "needs_build=true" >> "$GITHUB_OUTPUT"
echo "changed_files=<none>" >> "$GITHUB_OUTPUT"
echo "needs_runtime_build=true" >> "$GITHUB_OUTPUT"
exit 0
fi
fi
NEEDS_BUILD=true
@@ -160,6 +170,8 @@ jobs:
SUMMARY="Nur Doku-/Teständerungen Container-Build wird übersprungen"
elif [ "$NEEDS_BUILD" = "false" ] && [ "$OTHER_NON_IGNORED" = "true" ]; then
SUMMARY="Keine Build-Trigger gefunden Container-Build wird übersprungen"
elif [ "$NEEDS_BUILD" = "true" ]; then
SUMMARY="Runtime-relevante Änderungen erkannt Container-Build wird ausgeführt"
fi
else
RUNTIME_BUILD=true
@@ -187,7 +199,7 @@ jobs:
runtime-base:
name: Build Runtime Base Image
needs: changes
if: always()
if: needs.changes.outputs.needs_runtime_build == 'true'
runs-on: docker-build
outputs:
image_ref: ${{ steps.set-result.outputs.image_ref }}
@@ -396,6 +408,7 @@ jobs:
echo "image_ref=$TARGET_REGISTRY/$RUNTIME_IMAGE_NAME:latest" >> "$GITHUB_OUTPUT"
echo "built=true" >> "$GITHUB_OUTPUT"
else
# When runtime build is skipped, output empty but build job will use default latest image
echo "image_ref=" >> "$GITHUB_OUTPUT"
echo "built=false" >> "$GITHUB_OUTPUT"
fi
@@ -727,6 +740,24 @@ jobs:
echo " Image: $IMAGE_NAME"
echo " Tags: latest, $TAG, git-$SHORT_SHA"
# Build cache sources - branch-specific and general caches
CACHE_SOURCES=(
"type=registry,ref=${CACHE_TARGET}/${IMAGE_NAME}:buildcache"
"type=registry,ref=${REGISTRY_TO_USE}/${IMAGE_NAME}:latest"
"type=registry,ref=${REGISTRY_TO_USE}/${IMAGE_NAME}:${REF_NAME}-cache"
)
# If this is not the first build, try to use previous commit's tag as cache
if git rev-parse HEAD^ >/dev/null 2>&1; then
PREV_SHORT_SHA=$(git rev-parse --short=7 HEAD^)
CACHE_SOURCES+=("type=registry,ref=${REGISTRY_TO_USE}/${IMAGE_NAME}:git-${PREV_SHORT_SHA}")
fi
CACHE_FROM_ARGS=""
for CACHE_SRC in "${CACHE_SOURCES[@]}"; do
CACHE_FROM_ARGS="${CACHE_FROM_ARGS} --cache-from ${CACHE_SRC}"
done
docker buildx build \
--platform linux/amd64 \
--file ./Dockerfile.production \
@@ -734,9 +765,9 @@ jobs:
--tag "${REGISTRY_TO_USE}/${IMAGE_NAME}:latest" \
--tag "${REGISTRY_TO_USE}/${IMAGE_NAME}:${TAG}" \
--tag "${REGISTRY_TO_USE}/${IMAGE_NAME}:git-${SHORT_SHA}" \
--cache-from type=registry,ref="${CACHE_TARGET}/${IMAGE_NAME}:buildcache" \
--cache-from type=registry,ref="${REGISTRY_TO_USE}/${IMAGE_NAME}:latest" \
${CACHE_FROM_ARGS} \
--cache-to type=registry,ref="${CACHE_TARGET}/${IMAGE_NAME}:buildcache",mode=max \
--cache-to type=registry,ref="${REGISTRY_TO_USE}/${IMAGE_NAME}:${REF_NAME}-cache",mode=max \
--build-arg BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') \
--build-arg GIT_COMMIT=${COMMIT_SHA} \
--build-arg GIT_BRANCH=${REF_NAME} \
@@ -787,7 +818,9 @@ jobs:
deploy-staging:
name: Auto-deploy to Staging
needs: [changes, build, runtime-base]
if: github.ref_name == 'staging' || github.head_ref == 'staging' || (github.ref_name == '' && contains(github.ref, 'staging'))
if: |
(github.ref_name == 'staging' || github.head_ref == 'staging' || (github.ref_name == '' && contains(github.ref, 'staging'))) &&
(needs.build.result == 'success' || needs.build.result == 'skipped')
runs-on: ubuntu-latest
environment:
name: staging
@@ -952,21 +985,29 @@ jobs:
fi
fi
# If docker-compose.yml doesn't exist, it will be created from repo
if [ ! -f docker-compose.yml ]; then
echo "⚠️ docker-compose.yml not found, copying from repo..."
cp /workspace/repo/deployment/stacks/staging/docker-compose.yml . || {
echo "❌ Failed to copy docker-compose.yml"
# Copy base and staging docker-compose files if they don't exist
if [ ! -f docker-compose.base.yml ]; then
echo "⚠️ docker-compose.base.yml not found, copying from repo..."
cp /workspace/repo/docker-compose.base.yml . || {
echo "❌ Failed to copy docker-compose.base.yml"
exit 1
}
fi
# Update docker-compose.yml with new image tag
echo "📝 Updating docker-compose.yml..."
sed -i "s|image:.*/${IMAGE_NAME}:.*|image: ${DEPLOY_IMAGE}|g" docker-compose.yml
if [ ! -f docker-compose.staging.yml ]; then
echo "⚠️ docker-compose.staging.yml not found, copying from repo..."
cp /workspace/repo/docker-compose.staging.yml . || {
echo "❌ Failed to copy docker-compose.staging.yml"
exit 1
}
fi
echo "✅ Updated docker-compose.yml:"
grep "image:" docker-compose.yml | head -5
# Update docker-compose.staging.yml with new image tag
echo "📝 Updating docker-compose.staging.yml with new image tag..."
sed -i "s|image:.*/${IMAGE_NAME}:.*|image: ${DEPLOY_IMAGE}|g" docker-compose.staging.yml
echo "✅ Updated docker-compose.staging.yml:"
grep "image:" docker-compose.staging.yml | head -5
# Ensure networks exist
echo "🔗 Ensuring Docker networks exist..."
@@ -974,7 +1015,8 @@ jobs:
docker network create staging-internal 2>/dev/null || true
echo "🔄 Starting/updating services..."
docker compose up -d --pull always --force-recreate || {
# Use --pull missing instead of --pull always since we already pulled the specific image
docker compose -f docker-compose.base.yml -f docker-compose.staging.yml up -d --pull missing --force-recreate || {
echo "❌ Failed to start services"
exit 1
}
@@ -982,27 +1024,32 @@ jobs:
echo "⏳ Waiting for services to start..."
sleep 15
# Force containers to pull latest code from Git repository
echo "🔄 Pulling latest code from Git repository in staging-app container..."
docker compose exec -T staging-app bash -c "cd /var/www/html && git -c safe.directory=/var/www/html fetch origin staging && git -c safe.directory=/var/www/html reset --hard origin/staging && git -c safe.directory=/var/www/html clean -fd" || echo "⚠️ Git pull failed, container will sync on next restart"
# Pull latest code from Git repository only if image was actually rebuilt
# Skip if build was skipped (no changes detected) - container already has latest code
if [ "${{ needs.build.result }}" = "success" ] && [ -n "${{ needs.build.outputs.image_url }}" ] && [ "${{ needs.build.outputs.image_url }}" != "null" ]; then
echo "🔄 Pulling latest code from Git repository in staging-app container (image was rebuilt)..."
docker compose -f docker-compose.base.yml -f docker-compose.staging.yml exec -T staging-app bash -c "cd /var/www/html && git -c safe.directory=/var/www/html fetch origin staging && git -c safe.directory=/var/www/html reset --hard origin/staging && git -c safe.directory=/var/www/html clean -fd" || echo "⚠️ Git pull failed, container will sync on next restart"
else
echo " Skipping Git pull - no new image built, container already has latest code"
fi
# Also trigger a restart to ensure entrypoint script runs
echo "🔄 Restarting staging-app to ensure all services are up-to-date..."
docker compose restart staging-app || echo "⚠️ Failed to restart staging-app"
docker compose -f docker-compose.base.yml -f docker-compose.staging.yml restart staging-app || echo "⚠️ Failed to restart staging-app"
# Fix nginx upstream configuration - critical fix for 502 errors
# sites-available/default uses 127.0.0.1:9000 but PHP-FPM runs in staging-app container
echo "🔧 Fixing nginx PHP-FPM upstream configuration (post-deploy fix)..."
sleep 5
docker compose exec -T staging-nginx sed -i '/upstream php-upstream {/,/}/s|server 127.0.0.1:9000;|server staging-app:9000;|g' /etc/nginx/sites-available/default || echo "⚠️ Upstream fix (127.0.0.1) failed"
docker compose exec -T staging-nginx sed -i '/upstream php-upstream {/,/}/s|server localhost:9000;|server staging-app:9000;|g' /etc/nginx/sites-available/default || echo "⚠️ Upstream fix (localhost) failed"
docker compose exec -T staging-nginx nginx -t && docker compose restart staging-nginx || echo "⚠️ Nginx config test or restart failed"
docker compose -f docker-compose.base.yml -f docker-compose.staging.yml exec -T staging-nginx sed -i '/upstream php-upstream {/,/}/s|server 127.0.0.1:9000;|server staging-app:9000;|g' /etc/nginx/sites-available/default || echo "⚠️ Upstream fix (127.0.0.1) failed"
docker compose -f docker-compose.base.yml -f docker-compose.staging.yml exec -T staging-nginx sed -i '/upstream php-upstream {/,/}/s|server localhost:9000;|server staging-app:9000;|g' /etc/nginx/sites-available/default || echo "⚠️ Upstream fix (localhost) failed"
docker compose -f docker-compose.base.yml -f docker-compose.staging.yml exec -T staging-nginx nginx -t && docker compose -f docker-compose.base.yml -f docker-compose.staging.yml restart staging-nginx || echo "⚠️ Nginx config test or restart failed"
echo "✅ Nginx configuration fixed and reloaded"
echo "⏳ Waiting for services to stabilize..."
sleep 10
echo "📊 Container status:"
docker compose ps
docker compose -f docker-compose.base.yml -f docker-compose.staging.yml ps
echo "✅ Staging deployment completed!"
EOF
@@ -1137,15 +1184,33 @@ jobs:
exit 1
}
echo "📝 Updating docker-compose.yml..."
sed -i "s|image:.*/${IMAGE_NAME}:.*|image: ${FULL_IMAGE}|g" docker-compose.yml
sed -i "s|image:.*/${IMAGE_NAME}@.*|image: ${FULL_IMAGE}|g" docker-compose.yml
# Copy base and production docker-compose files if they don't exist
if [ ! -f docker-compose.base.yml ]; then
echo "⚠️ docker-compose.base.yml not found, copying from repo..."
cp /workspace/repo/docker-compose.base.yml . || {
echo "❌ Failed to copy docker-compose.base.yml"
exit 1
}
fi
echo "✅ Updated docker-compose.yml:"
grep "image:" docker-compose.yml | head -5
if [ ! -f docker-compose.production.yml ]; then
echo "⚠️ docker-compose.production.yml not found, copying from repo..."
cp /workspace/repo/docker-compose.production.yml . || {
echo "❌ Failed to copy docker-compose.production.yml"
exit 1
}
fi
echo "📝 Updating docker-compose.production.yml with new image tag..."
sed -i "s|image:.*/${IMAGE_NAME}:.*|image: ${FULL_IMAGE}|g" docker-compose.production.yml
sed -i "s|image:.*/${IMAGE_NAME}@.*|image: ${FULL_IMAGE}|g" docker-compose.production.yml
echo "✅ Updated docker-compose.production.yml:"
grep "image:" docker-compose.production.yml | head -5
echo "🔄 Restarting services..."
docker compose up -d --pull always --force-recreate || {
# Use --pull missing instead of --pull always since we already pulled the specific image
docker compose -f docker-compose.base.yml -f docker-compose.production.yml up -d --pull missing --force-recreate || {
echo "❌ Failed to restart services"
exit 1
}
@@ -1154,7 +1219,7 @@ jobs:
sleep 10
echo "📊 Container status:"
docker compose ps
docker compose -f docker-compose.base.yml -f docker-compose.production.yml ps
echo "✅ Production deployment completed!"
EOF

View File

@@ -41,6 +41,7 @@ jobs:
fi
echo "target_ref=$TARGET" >> "$GITHUB_OUTPUT"
echo "TARGET_REF=$TARGET" >> $GITHUB_ENV
echo "BRANCH_NAME=$TARGET" >> $GITHUB_ENV
- name: Download CI helpers
shell: bash
@@ -173,14 +174,28 @@ jobs:
IMAGE_NAME="${{ env.RUNTIME_IMAGE_NAME }}"
DATE_TAG="warm-$(date -u +%Y%m%d%H%M)"
BRANCH_NAME="${{ env.BRANCH_NAME || 'main' }}"
# Build cache sources - multiple sources for better cache hit rate
CACHE_SOURCES=(
"type=registry,ref=${TARGET_REGISTRY}/${IMAGE_NAME}:buildcache"
"type=registry,ref=${TARGET_REGISTRY}/${IMAGE_NAME}:${BRANCH_NAME}-cache"
"type=registry,ref=${TARGET_REGISTRY}/${IMAGE_NAME}:latest"
)
CACHE_FROM_ARGS=""
for CACHE_SRC in "${CACHE_SOURCES[@]}"; do
CACHE_FROM_ARGS="${CACHE_FROM_ARGS} --cache-from ${CACHE_SRC}"
done
docker buildx build \
--platform linux/amd64 \
--file ./Dockerfile.production \
--target runtime-base \
--build-arg RUNTIME_IMAGE=runtime-base \
--cache-from type=registry,ref="$TARGET_REGISTRY/$IMAGE_NAME:buildcache" \
--cache-to type=registry,ref="$TARGET_REGISTRY/$IMAGE_NAME:buildcache",mode=max \
${CACHE_FROM_ARGS} \
--cache-to type=registry,ref="${TARGET_REGISTRY}/${IMAGE_NAME}:buildcache",mode=max \
--cache-to type=registry,ref="${TARGET_REGISTRY}/${IMAGE_NAME}:${BRANCH_NAME}-cache",mode=max \
--tag "$TARGET_REGISTRY/$IMAGE_NAME:$DATE_TAG" \
--push \
.
@@ -201,6 +216,7 @@ jobs:
IMAGE_NAME="${{ env.IMAGE_NAME }}"
DATE_TAG="warm-$(date -u +%Y%m%d%H%M)"
BRANCH_NAME="${{ env.BRANCH_NAME || 'main' }}"
DEFAULT_RUNTIME="$CACHE_TARGET/${{ env.RUNTIME_IMAGE_NAME }}:latest"
RUNTIME_ARG="runtime-base"
@@ -208,12 +224,25 @@ jobs:
RUNTIME_ARG="$DEFAULT_RUNTIME"
fi
# Build cache sources - multiple sources for better cache hit rate
CACHE_SOURCES=(
"type=registry,ref=${CACHE_TARGET}/${IMAGE_NAME}:buildcache"
"type=registry,ref=${REGISTRY_TO_USE}/${IMAGE_NAME}:${BRANCH_NAME}-cache"
"type=registry,ref=${REGISTRY_TO_USE}/${IMAGE_NAME}:latest"
)
CACHE_FROM_ARGS=""
for CACHE_SRC in "${CACHE_SOURCES[@]}"; do
CACHE_FROM_ARGS="${CACHE_FROM_ARGS} --cache-from ${CACHE_SRC}"
done
docker buildx build \
--platform linux/amd64 \
--file ./Dockerfile.production \
--build-arg RUNTIME_IMAGE="$RUNTIME_ARG" \
--cache-from type=registry,ref="$CACHE_TARGET/$IMAGE_NAME:buildcache" \
--cache-to type=registry,ref="$CACHE_TARGET/$IMAGE_NAME:buildcache",mode=max \
${CACHE_FROM_ARGS} \
--cache-to type=registry,ref="${CACHE_TARGET}/${IMAGE_NAME}:buildcache",mode=max \
--cache-to type=registry,ref="${REGISTRY_TO_USE}/${IMAGE_NAME}:${BRANCH_NAME}-cache",mode=max \
--tag "$REGISTRY_TO_USE/$IMAGE_NAME:$DATE_TAG" \
--push \
.

View File

@@ -15,9 +15,6 @@ on:
- main
- staging
env:
CACHE_DIR: /tmp/composer-cache
jobs:
tests:
name: Run Tests & Quality Checks
@@ -77,23 +74,27 @@ jobs:
cd /workspace/repo
- name: Restore Composer cache
- name: Get Composer cache directory
id: composer-cache
shell: bash
run: |
if [ -d "$CACHE_DIR/vendor" ]; then
echo "📦 Restore composer dependencies"
cp -r "$CACHE_DIR/vendor" /workspace/repo/vendor || true
fi
echo "dir=$(composer global config cache-dir 2>/dev/null | cut -d' ' -f3 || echo "$HOME/.composer/cache")" >> $GITHUB_OUTPUT
- name: Cache Composer dependencies
uses: actions/cache@v4
with:
path: |
${{ steps.composer-cache.outputs.dir }}
vendor/
key: ${{ runner.os }}-composer-${{ hashFiles('**/composer.lock') }}
restore-keys: |
${{ runner.os }}-composer-
- name: Install PHP dependencies
run: |
cd /workspace/repo
composer install --no-interaction --prefer-dist --optimize-autoloader --ignore-platform-req=php
- name: Save Composer cache
run: |
mkdir -p "$CACHE_DIR"
cp -r /workspace/repo/vendor "$CACHE_DIR/vendor" || true
- name: PHPStan (baseline)
run: |
cd /workspace/repo
@@ -104,6 +105,42 @@ jobs:
cd /workspace/repo
make cs || echo "⚠️ php-cs-fixer dry run issues detected"
- name: Validate .env.base for secrets
run: |
cd /workspace/repo
if [ -f .env.base ]; then
echo "🔍 Checking .env.base for secrets..."
# Check for potential secrets (case-insensitive)
if grep -qiE "(password|secret|key|token|encryption|vault)" .env.base | grep -v "^#" | grep -v "FILE=" | grep -v "^$$" > /dev/null; then
echo "::error::.env.base contains potential secrets! Secrets should be in .env.local or Docker Secrets."
echo "⚠️ Found potential secrets in .env.base:"
grep -iE "(password|secret|key|token|encryption|vault)" .env.base | grep -v "^#" | grep -v "FILE=" | grep -v "^$$" || true
echo ""
echo "💡 Move secrets to:"
echo " - .env.local (for local development)"
echo " - Docker Secrets (for production/staging)"
exit 1
else
echo "✅ .env.base does not contain secrets"
fi
else
echo " .env.base not found (optional during migration)"
fi
echo ""
echo "🔍 Checking docker-compose.base.yml for hardcoded passwords..."
if grep -E "(PASSWORD|SECRET|TOKEN).*:-[^}]*[^}]}" docker-compose.base.yml 2>/dev/null | grep -v "^#" | grep -v "FILE=" > /dev/null; then
echo "::error::docker-compose.base.yml contains hardcoded password fallbacks! Passwords must be set explicitly."
echo "⚠️ Found hardcoded password fallbacks:"
grep -E "(PASSWORD|SECRET|TOKEN).*:-[^}]*[^}]}" docker-compose.base.yml | grep -v "^#" | grep -v "FILE=" || true
echo ""
echo "💡 Remove fallback values (:-...) from base file"
echo " Passwords must be set in .env.local or via Docker Secrets"
exit 1
else
echo "✅ docker-compose.base.yml does not contain hardcoded password fallbacks"
fi
- name: Tests temporarily skipped
run: |
echo "⚠️ Tests temporarily skipped due to PHP 8.5 compatibility issues"

View File

@@ -11,7 +11,120 @@ on:
workflow_dispatch:
jobs:
check-changes:
name: Check for Dependency Changes
runs-on: ubuntu-latest
outputs:
dependencies_changed: ${{ steps.filter.outputs.dependencies_changed }}
steps:
- name: Download CI helpers
shell: bash
env:
CI_TOKEN: ${{ secrets.CI_TOKEN }}
run: |
set -euo pipefail
REF="${{ github.sha }}"
if [ -z "$REF" ]; then
REF="${{ github.ref_name }}"
fi
if [ -z "$REF" ]; then
REF="${{ github.head_ref }}"
fi
if [ -z "$REF" ]; then
REF="main"
fi
URL="https://git.michaelschiemer.de/${{ github.repository }}/raw/${REF}/scripts/ci/clone_repo.sh"
mkdir -p /tmp/ci-tools
if [ -n "$CI_TOKEN" ]; then
curl -sfL -u "$CI_TOKEN:x-oauth-basic" "$URL" -o /tmp/ci-tools/clone_repo.sh
else
curl -sfL "$URL" -o /tmp/ci-tools/clone_repo.sh
fi
chmod +x /tmp/ci-tools/clone_repo.sh
- name: Analyse changed files
id: filter
shell: bash
run: |
set -euo pipefail
REF_NAME="${{ github.ref_name }}"
if [ -z "$REF_NAME" ]; then
REF_NAME="${{ github.head_ref }}"
fi
if [ -z "$REF_NAME" ]; then
REF_NAME="main"
fi
REPO="${{ github.repository }}"
WORKDIR="/workspace/repo"
export CI_REPOSITORY="$REPO"
export CI_TOKEN="${{ secrets.CI_TOKEN }}"
export CI_REF_NAME="$REF_NAME"
export CI_DEFAULT_BRANCH="main"
export CI_TARGET_DIR="$WORKDIR"
export CI_FETCH_DEPTH="2"
/tmp/ci-tools/clone_repo.sh
cd "$WORKDIR"
# For scheduled or manual runs, always run the scan
if [ "${{ github.event_name }}" = "schedule" ] || [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
echo "dependencies_changed=true" >> "$GITHUB_OUTPUT"
echo " Scheduled/manual run - will scan dependencies"
exit 0
fi
CHANGED_FILES=""
EVENT_BEFORE="${{ github.event.before }}"
if [ "${{ github.event_name }}" = "push" ] && [ -n "$EVENT_BEFORE" ]; then
if git rev-parse "$EVENT_BEFORE" >/dev/null 2>&1; then
CHANGED_FILES="$(git diff --name-only "$EVENT_BEFORE" HEAD || true)"
else
git fetch origin "$EVENT_BEFORE" --depth 1 || true
if git rev-parse "$EVENT_BEFORE" >/dev/null 2>&1; then
CHANGED_FILES="$(git diff --name-only "$EVENT_BEFORE" HEAD || true)"
fi
fi
fi
if [ -z "$CHANGED_FILES" ]; then
if git rev-parse HEAD^ >/dev/null 2>&1; then
CHANGED_FILES="$(git diff --name-only HEAD^ HEAD || true)"
else
git fetch origin "$REF_NAME" --depth 50 || true
if git rev-parse HEAD^ >/dev/null 2>&1; then
CHANGED_FILES="$(git diff --name-only HEAD^ HEAD || true)"
fi
fi
fi
DEPENDENCIES_CHANGED=false
if [ -n "$CHANGED_FILES" ]; then
while IFS= read -r FILE; do
[ -z "$FILE" ] && continue
if echo "$FILE" | grep -Eq "^(composer\.json|composer\.lock)$"; then
DEPENDENCIES_CHANGED=true
break
fi
done <<< "$CHANGED_FILES"
fi
echo "dependencies_changed=$DEPENDENCIES_CHANGED" >> "$GITHUB_OUTPUT"
if [ "$DEPENDENCIES_CHANGED" = "true" ]; then
echo " Dependencies changed - security scan will run"
else
echo " No dependency changes detected - skipping security scan"
fi
security-audit:
needs: check-changes
if: needs.check-changes.outputs.dependencies_changed == 'true' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
name: Composer Security Audit
runs-on: php-ci # Uses pre-built PHP 8.5 CI image with Composer pre-installed
@@ -55,6 +168,22 @@ jobs:
cd /workspace/repo
- name: Get Composer cache directory
id: composer-cache
shell: bash
run: |
echo "dir=$(composer global config cache-dir 2>/dev/null | cut -d' ' -f3 || echo "$HOME/.composer/cache")" >> $GITHUB_OUTPUT
- name: Cache Composer dependencies
uses: actions/cache@v4
with:
path: |
${{ steps.composer-cache.outputs.dir }}
vendor/
key: ${{ runner.os }}-composer-security-${{ hashFiles('**/composer.lock') }}
restore-keys: |
${{ runner.os }}-composer-security-
- name: Validate composer.json and composer.lock
run: |
cd /workspace/repo
@@ -63,13 +192,6 @@ jobs:
# Try to update lock file if needed
composer update --lock --no-interaction || echo "⚠️ Could not update lock file, but continuing..."
- name: Cache Composer packages (simple)
run: |
if [ -d "/tmp/composer-cache/vendor" ]; then
echo "📦 Restoring cached dependencies..."
cp -r /tmp/composer-cache/vendor /workspace/repo/vendor || true
fi
- name: Install dependencies
run: |
cd /workspace/repo
@@ -77,11 +199,6 @@ jobs:
# TODO: Remove --ignore-platform-req=php when dependencies are updated (estimated: 1 month)
composer install --prefer-dist --no-progress --no-dev --ignore-platform-req=php
- name: Save Composer cache
run: |
mkdir -p /tmp/composer-cache
cp -r /workspace/repo/vendor /tmp/composer-cache/vendor || true
- name: Run Composer Security Audit
id: security-audit
run: |

6
.gitignore vendored
View File

@@ -8,6 +8,8 @@ Thumbs.db
# Build / Runtime
vendor/
.env
.env.local
.env.staging
*.log
*.retry
x_ansible/.vault_pass
@@ -58,5 +60,5 @@ cookies_new.txt
playwright-report/
test-results/
.playwright/
# WireGuard client configs (generated locally)
deployment/ansible/wireguard-clients/
# WireGuard client configs (generated locally)
deployment/ansible/wireguard-clients/

View File

@@ -22,7 +22,8 @@ RUN apt-get update && apt-get install -y \
# Install Composer
COPY --from=composer:latest /usr/bin/composer /usr/bin/composer
RUN composer install \
RUN --mount=type=cache,target=/root/.composer/cache \
composer install \
--no-dev \
--no-scripts \
--no-autoloader \
@@ -44,7 +45,8 @@ WORKDIR /app
COPY package.json package-lock.json ./
# Install npm dependencies
RUN npm ci --production=false
RUN --mount=type=cache,target=/root/.npm \
npm ci --production=false
# Copy source files needed for build
COPY resources ./resources

View File

@@ -1,12 +1,15 @@
# Environment Configuration Guide
## 📁 .env File Structure (Simplified)
## 📁 .env File Structure (Base + Override Pattern)
Nach der Konsolidierung vom 27.10.2024 gibt es nur noch **2 .env Files** im Root:
Die neue Struktur verwendet ein **Base + Override Pattern** (analog zu docker-compose):
```
├── .env # Development (aktiv, gitignored)
── .env.example # Template für neue Entwickler
├── .env.example # Template für neue Entwickler (vollständige Dokumentation)
── .env.base # Gemeinsame Variablen für alle Environments (versioniert)
├── .env.local # Lokale Development-Overrides (gitignored)
├── .env.staging # Staging-spezifische Overrides (optional, gitignored)
└── .env.production # Production (generiert durch Ansible, nicht im Repo)
```
## 🏗️ Development Setup
@@ -14,21 +17,32 @@ Nach der Konsolidierung vom 27.10.2024 gibt es nur noch **2 .env Files** im Root
### Initial Setup
```bash
# 1. Copy example file
cp .env.example .env
# 1. .env.base ist bereits im Repository (gemeinsame Variablen)
# 2. Erstelle .env.local für lokale Overrides
cp .env.example .env.local
# 2. Anpassungen für lokale Entwicklung
# - DB Credentials
# - API Keys
# - Feature Flags
# 3. Passe .env.local an deine lokale Entwicklung an
# - DB Credentials (lokal)
# - API Keys (lokal)
# - Debug-Flags
```
### Active File: `.env`
### Framework lädt automatisch: `.env.base` → `.env.local` (Overrides)
- ✅ Wird von Docker Compose verwendet
- ✅ Wird von PHP Application gelesen
- ❌ NICHT committen (gitignored)
- ✅ Jeder Entwickler hat eigene Version
**Priorität:**
1. System Environment Variables (Docker ENV)
2. `.env.base` (gemeinsame Basis)
3. `.env.local` (lokale Overrides)
4. `.env.secrets` (verschlüsselte Secrets, optional)
**Wichtig:** `env_file` in Docker Compose ist nicht nötig!
- Framework lädt automatisch `.env.base``.env.local` via `EncryptedEnvLoader`
- Docker Compose `env_file` ist optional und wird nur für Container-interne Variablen benötigt
- PHP-Anwendung lädt ENV-Variablen direkt aus den Dateien
**Backward Compatibility:**
- Falls `.env.base` oder `.env.local` nicht existieren, wird `.env` geladen (Fallback)
- Migration: Bestehende `.env` Files funktionieren weiterhin
## 🚀 Production Deployment
@@ -63,54 +77,111 @@ ansible-playbook -i inventories/production/hosts.yml \
3. File nach `/home/deploy/michaelschiemer/shared/.env.production` deployen
4. Docker Compose mounted diesen File in Container
## 🔒 Security Best Practices
## 🔒 Security & Secret Management
### Docker Secrets (Production & Staging)
**Production und Staging verwenden Docker Secrets:**
1. **Ansible Vault → Docker Secrets Dateien**
- Ansible Playbook erstellt Secret-Dateien in `secrets/` Verzeichnis
- Dateien haben sichere Permissions (0600)
2. **Docker Compose Secrets**
- Secrets werden in `docker-compose.base.yml` definiert
- Environment-Variablen nutzen `*_FILE` Pattern (z.B. `DB_PASSWORD_FILE=/run/secrets/db_user_password`)
- Framework lädt automatisch via `DockerSecretsResolver`
3. **Framework Support**
- `DockerSecretsResolver` unterstützt automatisch `*_FILE` Pattern
- Kein manuelles Secret-Loading mehr nötig (wird automatisch vom Framework behandelt)
### Development
```bash
# .env niemals committen
# .env.local niemals committen
git status
# Should show: .env (untracked) ✅
# Should show: .env.local (untracked) ✅
# .env.base ist versioniert (keine Secrets!)
# Falls versehentlich staged:
git reset HEAD .env
git reset HEAD .env.local
```
### Production
- ✅ Secrets in Ansible Vault
- ✅ Ansible erstellt Docker Secrets Dateien (`secrets/*.txt`)
- ✅ Docker Compose Secrets aktiviert
- ✅ Framework lädt automatisch via `*_FILE` Pattern
- ✅ .env.production auf Server wird NICHT ins Repository committed
- ✅ Template `.env.production.j2` enthält nur Platzhalter
- ✅ Echte Werte werden zur Deploy-Zeit eingesetzt
- ✅ Template `application.env.j2` verwendet `*_FILE` Pattern
## 📝 Adding New Environment Variables
### Development
```bash
# 1. Add to .env.example with placeholder
echo "NEW_API_KEY=your_api_key_here" >> .env.example
# 1. Add to .env.base if shared across environments
echo "NEW_API_KEY=" >> .env.base
# 2. Add actual value to your local .env
echo "NEW_API_KEY=abc123..." >> .env
# 2. Add to .env.local for local development
echo "NEW_API_KEY=abc123..." >> .env.local
# 3. Update .env.example for documentation
echo "NEW_API_KEY=your_api_key_here" >> .env.example
```
### Production
**Hinweis:** Wenn die Variable nur für lokale Entwicklung ist, nur in `.env.local` hinzufügen.
### Production (mit Docker Secrets)
```bash
# 1. Add to Ansible Template (use *_FILE pattern for secrets)
# File: deployment/ansible/templates/application.env.j2
echo "# Use Docker Secrets via *_FILE pattern" >> application.env.j2
echo "NEW_API_KEY_FILE=/run/secrets/new_api_key" >> application.env.j2
# 2. Add to docker-compose.base.yml secrets section
# File: docker-compose.base.yml
# secrets:
# new_api_key:
# file: ./secrets/new_api_key.txt
# 3. Add secret to Ansible Vault
ansible-vault edit deployment/ansible/secrets/production.vault.yml
# Add: vault_new_api_key: "production_value"
# 4. Update setup-production-secrets.yml to create secret file
# File: deployment/ansible/playbooks/setup-production-secrets.yml
# Add to loop:
# - name: new_api_key
# value: "{{ vault_new_api_key }}"
# 5. Deploy
cd deployment/ansible
ansible-playbook -i inventory/production.yml \
playbooks/setup-production-secrets.yml \
--vault-password-file .vault_pass
```
### Production (ohne Docker Secrets, fallback)
Falls Docker Secrets nicht verwendet werden sollen:
```bash
# 1. Add to Ansible Template
# File: deployment/infrastructure/templates/.env.production.j2
echo "NEW_API_KEY={{ vault_new_api_key }}" >> .env.production.j2
# File: deployment/ansible/templates/application.env.j2
echo "NEW_API_KEY={{ vault_new_api_key }}" >> application.env.j2
# 2. Add secret to Ansible Vault
ansible-vault edit deployment/infrastructure/group_vars/production/vault.yml
ansible-vault edit deployment/ansible/secrets/production.vault.yml
# Add: vault_new_api_key: "production_value"
# 3. Deploy
cd deployment/infrastructure
ansible-playbook -i inventories/production/hosts.yml \
playbooks/deploy-rsync-based.yml \
--vault-password-file .vault_pass
cd deployment/ansible
ansible-playbook -i inventory/production.yml \
playbooks/deploy-update.yml
```
## 🗑️ Removed Files (Consolidation 27.10.2024)
@@ -129,35 +200,67 @@ Diese Files wurden gelöscht, da sie redundant/nicht verwendet wurden:
## ✅ Current State
### Local Development
-Einziges aktives File: `.env`
-Template: `.env.example`
-Klar und eindeutig
-Base File: `.env.base` (versioniert, gemeinsame Variablen)
-Override File: `.env.local` (gitignored, lokale Anpassungen)
-Template: `.env.example` (Dokumentation)
- ✅ Framework lädt automatisch: `.env.base``.env.local` (Overrides)
### Production
- ✅ Single Source: `/home/deploy/michaelschiemer/shared/.env.production` (auf Server)
- ✅ Verwaltet durch: Ansible Template `.env.production.j2`
- ✅ Secrets in: Ansible Vault
- ✅ Verwaltet durch: Ansible Template `application.env.j2`
- ✅ Secrets: Docker Secrets (`secrets/*.txt` Dateien)
- ✅ Framework lädt automatisch via `*_FILE` Pattern (`DockerSecretsResolver`)
- ✅ Keine Duplikate
### Staging
- ✅ Docker Compose Environment Variables
- ✅ Docker Secrets aktiviert (wie Production)
- ✅ Optional: `.env.staging` für Staging-spezifische Overrides
## 🔍 Verification
```bash
# Check local .env files
ls -la .env*
# Should show: .env, .env.example
# Should show: .env.base (versioniert), .env.local (gitignored), .env.example
# Check Ansible template exists
ls -la deployment/infrastructure/templates/.env.production.j2
ls -la deployment/ansible/templates/application.env.j2
# Should exist
# Check Docker Secrets files exist (on server)
ls -la {{ app_stack_path }}/secrets/
# Should show: db_user_password.txt, redis_password.txt, app_key.txt, etc.
# Check NO old files remain
find . -name ".env.production" -o -name ".env.*.example" | grep -v .env.example
find . -name ".env.production" -o -name ".env.*.example" | grep -v .env.example | grep -v .env.base
# Should be empty
```
## 📞 Support
Bei Fragen zum .env Setup:
- Development: Siehe `.env.example`
- Production: Siehe `deployment/infrastructure/templates/.env.production.j2`
- Secrets: Kontaktiere DevOps Team für Ansible Vault Zugriff
- Development: Siehe `.env.base` (gemeinsame Variablen) und `.env.example` (Dokumentation)
- Production: Siehe `deployment/ansible/templates/application.env.j2`
- Secrets: Docker Secrets aktiviert, verwaltet durch Ansible Vault
- Migration: Framework unterstützt Fallback auf `.env` (alte Struktur)
## 🔄 Migration von alter Struktur
**Von `.env` zu `.env.base` + `.env.local`:**
```bash
# 1. Erstelle .env.base (gemeinsame Variablen extrahieren)
# (wird automatisch vom Framework erkannt)
# 2. Erstelle .env.local (nur lokale Overrides)
cp .env .env.local
# 3. Entferne gemeinsame Variablen aus .env.local
# (nur lokale Anpassungen behalten)
# 4. Alte .env kann später entfernt werden
# (nach erfolgreicher Migration)
```
**Hinweis:** Framework lädt automatisch `.env.base` + `.env.local`. Falls diese nicht existieren, wird `.env` als Fallback geladen (Backward Compatibility).

184
Makefile
View File

@@ -6,29 +6,56 @@
PROJECT_NAME = michaelschiemer
ENV ?= dev
# Standart Docker Compose Befehle
# Docker Compose Konfiguration
COMPOSE_BASE = docker-compose.base.yml
COMPOSE_LOCAL = docker-compose.local.yml
COMPOSE_STAGING = docker-compose.staging.yml
COMPOSE_PRODUCTION = docker-compose.production.yml
COMPOSE_FILES = -f $(COMPOSE_BASE) -f $(COMPOSE_LOCAL)
up: ## Startet alle Docker-Container
docker compose up -d
# Standart Docker Compose Befehle (Lokale Entwicklung)
up: ## Startet alle Docker-Container (lokale Entwicklung)
docker compose $(COMPOSE_FILES) up -d
down: ## Stoppt alle Container
docker compose down
docker compose $(COMPOSE_FILES) down
build:
docker compose build
build: ## Baut alle Docker-Images
docker compose $(COMPOSE_FILES) build
restart: ## Neustart aller Container
./bin/restart
logs: ## Zeigt Logs aus Docker
docker compose logs -f
docker compose $(COMPOSE_FILES) logs -f
ps: ## Docker PS
docker compose ps
docker compose $(COMPOSE_FILES) ps
reload: ## Dump Autoload & Restart PHP
docker-compose exec php composer dump-autoload -o
docker-compose restart php
docker compose $(COMPOSE_FILES) exec php composer dump-autoload -o
docker compose $(COMPOSE_FILES) restart php
# Staging Environment
up-staging: ## Startet Staging-Container
docker compose -f $(COMPOSE_BASE) -f $(COMPOSE_STAGING) up -d
down-staging: ## Stoppt Staging-Container
docker compose -f $(COMPOSE_BASE) -f $(COMPOSE_STAGING) down
logs-staging: ## Zeigt Staging-Logs
docker compose -f $(COMPOSE_BASE) -f $(COMPOSE_STAGING) logs -f
# Production Environment
up-production: ## Startet Production-Container (nur auf Server)
docker compose -f $(COMPOSE_BASE) -f $(COMPOSE_PRODUCTION) up -d
down-production: ## Stoppt Production-Container (nur auf Server)
docker compose -f $(COMPOSE_BASE) -f $(COMPOSE_PRODUCTION) down
logs-production: ## Zeigt Production-Logs (nur auf Server)
docker compose -f $(COMPOSE_BASE) -f $(COMPOSE_PRODUCTION) logs -f
flush-redis: ## Clear Redis cache (FLUSHALL)
docker exec redis redis-cli FLUSHALL
@@ -48,39 +75,39 @@ deploy: ## Führt Ansible Deploy aus
test: ## Führt alle Tests mit PHP 8.4 aus
@echo "🧪 Running tests with PHP 8.4..."
docker compose --profile test run --rm php-test ./vendor/bin/pest
docker compose $(COMPOSE_FILES) --profile test run --rm php-test ./vendor/bin/pest
test-php85: ## Führt alle Tests mit PHP 8.5 aus (Development)
@echo "🧪 Running tests with PHP 8.5..."
docker exec php ./vendor/bin/pest
test-coverage: ## Führt Tests mit Coverage-Report aus (PHP 8.4)
docker compose --profile test run --rm php-test ./vendor/bin/pest --coverage
docker compose $(COMPOSE_FILES) --profile test run --rm php-test ./vendor/bin/pest --coverage
test-coverage-html: ## Generiert HTML Coverage-Report (PHP 8.4)
docker compose --profile test run --rm php-test ./vendor/bin/pest --coverage-html coverage-html
docker compose $(COMPOSE_FILES) --profile test run --rm php-test ./vendor/bin/pest --coverage-html coverage-html
@echo "📊 Coverage-Report verfügbar unter: coverage-html/index.html"
test-unit: ## Führt nur Unit-Tests aus (PHP 8.4)
docker compose --profile test run --rm php-test ./vendor/bin/pest tests/Unit/
docker compose $(COMPOSE_FILES) --profile test run --rm php-test ./vendor/bin/pest tests/Unit/
test-framework: ## Führt nur Framework-Tests aus (PHP 8.4)
docker compose --profile test run --rm php-test ./vendor/bin/pest tests/Framework/
docker compose $(COMPOSE_FILES) --profile test run --rm php-test ./vendor/bin/pest tests/Framework/
test-domain: ## Führt nur Domain-Tests aus (PHP 8.4)
docker compose --profile test run --rm php-test ./vendor/bin/pest tests/Domain/
docker compose $(COMPOSE_FILES) --profile test run --rm php-test ./vendor/bin/pest tests/Domain/
test-watch: ## Führt Tests im Watch-Modus aus (PHP 8.4)
docker compose --profile test run --rm php-test ./vendor/bin/pest --watch
docker compose $(COMPOSE_FILES) --profile test run --rm php-test ./vendor/bin/pest --watch
test-parallel: ## Führt Tests parallel aus (PHP 8.4)
docker compose --profile test run --rm php-test ./vendor/bin/pest --parallel
docker compose $(COMPOSE_FILES) --profile test run --rm php-test ./vendor/bin/pest --parallel
test-profile: ## Profiling der langsamsten Tests (PHP 8.4)
docker compose --profile test run --rm php-test ./vendor/bin/pest --profile
docker compose $(COMPOSE_FILES) --profile test run --rm php-test ./vendor/bin/pest --profile
test-filter: ## Führt spezifische Tests aus (PHP 8.4) (Usage: make test-filter FILTER="EventDispatcher")
docker compose --profile test run --rm php-test ./vendor/bin/pest --filter="$(FILTER)"
docker compose $(COMPOSE_FILES) --profile test run --rm php-test ./vendor/bin/pest --filter="$(FILTER)"
# Security Checks
security-check: ## Führt Composer Security Audit aus
@@ -130,7 +157,7 @@ console: ## Run console commands (Usage: make console ARGS="command arguments")
composer: ## Use Composer
docker compose exec php composer $(ARGS)
docker compose $(COMPOSE_FILES) exec php composer $(ARGS)
fix-perms: ## Fix permissions
sudo chown -R $(USER):$(USER) .
@@ -139,10 +166,10 @@ cs:
@$(MAKE) composer ARGS="cs"
cs-fix-file: ## Fix code style for a specific file
docker compose exec -e PHP_CS_FIXER_IGNORE_ENV=1 php ./vendor/bin/php-cs-fixer fix $(subst \,/,$(FILE))
docker compose $(COMPOSE_FILES) exec -e PHP_CS_FIXER_IGNORE_ENV=1 php ./vendor/bin/php-cs-fixer fix $(subst \,/,$(FILE))
cs-fix: ## Fix code style for all PHP files
docker compose exec -e PHP_CS_FIXER_IGNORE_ENV=1 php ./vendor/bin/php-cs-fixer fix
docker compose $(COMPOSE_FILES) exec -e PHP_CS_FIXER_IGNORE_ENV=1 php ./vendor/bin/php-cs-fixer fix
phpstan: ## Run PHPStan static analysis
@$(MAKE) composer ARGS="phpstan"
@@ -150,12 +177,38 @@ phpstan: ## Run PHPStan static analysis
phpstan-baseline: ## Generate PHPStan baseline
@$(MAKE) composer ARGS="phpstan-baseline"
ssh: ## SSH-Verbindung zum Production-Server öffnen (nutzt ~/.ssh/config 'production')
@echo "🔌 Verbinde zum Production-Server..."
ssh production
ssh-production: ## SSH-Verbindung zum Production-Server öffnen
@echo "🔌 Verbinde zum Production-Server..."
ssh production
ssh-git: ## SSH-Verbindung zum Git-Server öffnen
@echo "🔌 Verbinde zum Git-Server..."
ssh git.michaelschiemer.de
ssh-status: ## Status der autossh-Services prüfen
@echo "📊 Prüfe autossh Service-Status..."
@systemctl --user status autossh-production.service --no-pager || echo "⚠️ autossh-production.service nicht aktiv"
@echo ""
@ps aux | grep autossh | grep -v grep || echo "⚠️ Keine autossh-Prozesse gefunden"
ssh-logs: ## Logs der autossh-Services anzeigen
@echo "📋 Zeige autossh Logs..."
@journalctl --user -u autossh-production.service -n 20 --no-pager || echo "⚠️ Keine Logs verfügbar"
setup-ssh: ## SSH-Schlüssel korrekt einrichten
mkdir -p ~/.ssh
cp /mnt/c/Users/Mike/.ssh/test.michaelschiemer.de ~/.ssh/staging
chmod 600 ~/.ssh/staging
@echo "SSH-Schlüssel für Staging korrekt eingerichtet"
setup-autossh: ## Autossh für persistente SSH-Verbindungen einrichten
@echo "🔧 Richte autossh für persistente SSH-Verbindungen ein..."
@bash scripts/setup-autossh.sh both
fix-ssh-perms: ## Korrigiert SSH-Schlüsselberechtigungen (veraltet)
chmod 600 /mnt/c/Users/Mike/.ssh/test.michaelschiemer.de
@echo "SSH-Schlüsselberechtigungen korrigiert"
@@ -257,4 +310,87 @@ ssl-backup: ## Backup Let's Encrypt certificates
push-staging: ## Pusht den aktuellen Stand nach origin/staging
git push origin HEAD:staging
.PHONY: up down build restart logs ps phpinfo deploy setup clean clean-coverage status fix-ssh-perms setup-ssh test test-coverage test-coverage-html test-unit test-framework test-domain test-watch test-parallel test-profile test-filter security-check security-audit-json security-check-prod update-production restart-production deploy-production-quick status-production logs-production logs-staging logs-staging-php ssl-init ssl-init-staging ssl-test ssl-renew ssl-status ssl-backup push-staging
# ENV File Management
env-base: ## Erstellt .env.base aus .env.example (gemeinsame Variablen)
@if [ ! -f .env.example ]; then \
echo "❌ .env.example nicht gefunden"; \
exit 1; \
fi
@if [ -f .env.base ]; then \
echo "⚠️ .env.base existiert bereits. Überschreiben? (j/n)"; \
read confirm; \
if [ "$$confirm" != "j" ]; then \
echo "❌ Abgebrochen"; \
exit 1; \
fi
fi
@echo "📝 Erstelle .env.base aus .env.example..."
@cp .env.example .env.base
@echo "✅ .env.base erstellt"
@echo "💡 Bearbeite .env.base und entferne environment-spezifische Variablen"
@echo "💡 Siehe ENV_SETUP.md für Details"
env-local: ## Erstellt .env.local für lokale Development-Overrides
@if [ -f .env.local ]; then \
echo "⚠️ .env.local existiert bereits. Überschreiben? (j/n)"; \
read confirm; \
if [ "$$confirm" != "j" ]; then \
echo "❌ Abgebrochen"; \
exit 1; \
fi
fi
@echo "📝 Erstelle .env.local..."
@if [ -f .env ]; then \
cp .env .env.local; \
echo "✅ .env.local erstellt aus .env"; \
else \
echo "APP_ENV=development" > .env.local; \
echo "APP_DEBUG=true" >> .env.local; \
echo "✅ .env.local erstellt (Minimal-Version)"; \
fi
@echo "💡 Bearbeite .env.local mit deinen lokalen Overrides"
@echo "💡 Siehe ENV_SETUP.md für Details"
env-check: ## Prüft .env.base auf Secrets (sollte keine enthalten)
@echo "🔍 Prüfe .env.base auf Secrets..."
@if [ ! -f .env.base ]; then \
echo "✅ .env.base existiert nicht (optional)"; \
exit 0; \
fi
@if grep -E "(PASSWORD|KEY|SECRET|TOKEN)" .env.base | grep -v "^#" | grep -v "=" | grep -v "^$$" > /dev/null; then \
echo "⚠️ Warnung: .env.base könnte Secrets enthalten:"; \
grep -E "(PASSWORD|KEY|SECRET|TOKEN)" .env.base | grep -v "^#" | grep -v "=" || true; \
echo "💡 Secrets sollten in .env.local oder Docker Secrets sein"; \
exit 1; \
else \
echo "✅ .env.base enthält keine Secrets"; \
fi
@echo ""
@echo "🔍 Prüfe docker-compose.base.yml auf hardcodierte Passwörter..."
@if grep -E "(PASSWORD|SECRET|TOKEN).*:-[^}]*[^}]}" docker-compose.base.yml | grep -v "^#" | grep -v "FILE=" > /dev/null 2>&1; then \
echo "⚠️ Warnung: docker-compose.base.yml enthält möglicherweise hardcodierte Passwörter:"; \
grep -E "(PASSWORD|SECRET|TOKEN).*:-[^}]*[^}]}" docker-compose.base.yml | grep -v "^#" | grep -v "FILE=" || true; \
echo "💡 Passwörter müssen explizit gesetzt werden, keine Fallbacks in Base-Datei"; \
exit 1; \
else \
echo "✅ docker-compose.base.yml enthält keine hardcodierten Passwörter"; \
fi
env-validate: ## Validiert ENV-Files (Base+Override Pattern)
@echo "🔍 Validiere ENV-Files..."
@if [ -f .env.base ]; then \
echo "✅ .env.base existiert"; \
else \
echo "⚠️ .env.base nicht gefunden (optional für Migration)"; \
fi
@if [ -f .env.local ]; then \
echo "✅ .env.local existiert"; \
else \
echo "⚠️ .env.local nicht gefunden"; \
fi
@if [ -f .env ] && [ ! -f .env.base ]; then \
echo "✅ Legacy .env verwendet (Fallback)"; \
fi
@echo "💡 Framework lädt: .env.base → .env.local → System ENV"
.PHONY: up down build restart logs ps phpinfo deploy setup clean clean-coverage status fix-ssh-perms setup-ssh setup-autossh ssh ssh-production ssh-git ssh-status ssh-logs test test-coverage test-coverage-html test-unit test-framework test-domain test-watch test-parallel test-profile test-filter security-check security-audit-json security-check-prod update-production restart-production deploy-production-quick status-production logs-production logs-staging logs-staging-php ssl-init ssl-init-staging ssl-test ssl-renew ssl-status ssl-backup push-staging env-base env-local env-check env-validate

View File

@@ -17,8 +17,9 @@ make up
# Oder: Manuelle Installation
composer install
npm install
cp .env.example .env
# Bearbeiten Sie .env mit Ihren Einstellungen
# Neue Base+Override Struktur: .env.base + .env.local
# Siehe ENV_SETUP.md für Details
# Für Backward Compatibility: cp .env.example .env (wird als Fallback geladen)
```
### Production Deployment

View File

@@ -4,6 +4,33 @@
This deployment setup uses separate Docker Compose stacks for better maintainability and clear separation of concerns.
### Docker Compose Structure
The project uses a **Base + Override Pattern** to prevent configuration drift between environments:
- **`docker-compose.base.yml`** - Shared base configuration (services, networks, volumes)
- **`docker-compose.local.yml`** - Local development overrides (ports, host mounts, debug flags)
- **`docker-compose.staging.yml`** - Staging environment overrides (Traefik labels, staging volumes)
- **`docker-compose.production.yml`** - Production environment overrides (security, logging, resources)
**Usage:**
```bash
# Local development
docker compose -f docker-compose.base.yml -f docker-compose.local.yml up
# Staging
docker compose -f docker-compose.base.yml -f docker-compose.staging.yml up
# Production
docker compose -f docker-compose.base.yml -f docker-compose.production.yml up
```
**Benefits:**
- ✅ Single source of truth for shared configuration
- ✅ Environment-specific differences clearly visible
- ✅ Reduced configuration drift between environments
- ✅ Easier maintenance and updates
### Infrastructure Components
```

View File

@@ -62,11 +62,6 @@
set_fact:
server_vpn_ip: "{{ (wireguard_server_config_read.content | b64decode | regex_search('Address = ([0-9.]+)', '\\1')) | first | default('10.8.0.1') }}"
- name: Set default DNS servers if not provided
set_fact:
wireguard_dns_servers: "{{ [server_vpn_ip] }}"
when: wireguard_dns_servers | length == 0
- name: Extract WireGuard server IP octets
set_fact:
wireguard_server_ip_octets: "{{ server_vpn_ip.split('.') }}"

View File

@@ -0,0 +1,192 @@
---
- name: Diagnose Gitea Bad Gateway Issue
hosts: production
gather_facts: yes
become: no
vars:
gitea_stack_path: "{{ stacks_base_path }}/gitea"
tasks:
- name: Check if Gitea stack directory exists
stat:
path: "{{ gitea_stack_path }}"
register: gitea_stack_dir
- name: Display Gitea stack directory status
debug:
msg: "Gitea stack path: {{ gitea_stack_path }} - Exists: {{ gitea_stack_dir.stat.exists }}"
- name: Check Gitea container status
shell: |
cd {{ gitea_stack_path }}
echo "=== Gitea Container Status ==="
docker compose ps 2>&1 || echo "Could not check container status"
args:
executable: /bin/bash
register: gitea_status
ignore_errors: yes
failed_when: false
when: gitea_stack_dir.stat.exists
- name: Display Gitea container status
debug:
msg: "{{ gitea_status.stdout_lines }}"
when: gitea_stack_dir.stat.exists
- name: Check if Gitea container is running
shell: |
docker ps --filter name=gitea --format "{{ '{{' }}.Names{{ '}}' }}: {{ '{{' }}.Status{{ '}}' }}"
register: gitea_running
ignore_errors: yes
failed_when: false
- name: Display Gitea running status
debug:
msg: "{{ gitea_running.stdout_lines if gitea_running.stdout else 'Gitea container not found' }}"
- name: Check Gitea logs (last 50 lines)
shell: |
cd {{ gitea_stack_path }}
echo "=== Gitea Logs (Last 50 lines) ==="
docker compose logs --tail=50 gitea 2>&1 || echo "Could not read Gitea logs"
args:
executable: /bin/bash
register: gitea_logs
ignore_errors: yes
failed_when: false
when: gitea_stack_dir.stat.exists
- name: Display Gitea logs
debug:
msg: "{{ gitea_logs.stdout_lines }}"
when: gitea_stack_dir.stat.exists
- name: Check Gitea container health
shell: |
docker inspect gitea --format '{{ '{{' }}.State.Health.Status{{ '}}' }}' 2>&1 || echo "Could not check health"
register: gitea_health
ignore_errors: yes
failed_when: false
- name: Display Gitea health status
debug:
msg: "Gitea health: {{ gitea_health.stdout }}"
- name: Test Gitea health endpoint from container
shell: |
docker exec gitea curl -f http://localhost:3000/api/healthz 2>&1 || echo "Health check failed"
register: gitea_internal_health
ignore_errors: yes
failed_when: false
- name: Display internal health check result
debug:
msg: "{{ gitea_internal_health.stdout_lines }}"
- name: Check if Gitea is reachable from Traefik network
shell: |
docker exec traefik curl -f http://gitea:3000/api/healthz 2>&1 || echo "Could not reach Gitea from Traefik network"
register: gitea_from_traefik
ignore_errors: yes
failed_when: false
- name: Display Traefik to Gitea connectivity
debug:
msg: "{{ gitea_from_traefik.stdout_lines }}"
- name: Check Traefik logs for Gitea errors
shell: |
cd {{ stacks_base_path }}/traefik
echo "=== Traefik Logs - Gitea related (Last 30 lines) ==="
docker compose logs --tail=100 traefik 2>&1 | grep -i "gitea" | tail -30 || echo "No Gitea-related logs found"
args:
executable: /bin/bash
register: traefik_gitea_logs
ignore_errors: yes
failed_when: false
- name: Display Traefik Gitea logs
debug:
msg: "{{ traefik_gitea_logs.stdout_lines }}"
- name: Check Docker networks
shell: |
echo "=== Docker Networks ==="
docker network ls
echo ""
echo "=== Traefik Network Details ==="
docker network inspect traefik-public 2>&1 | grep -E "(Name|Subnet|Containers|gitea)" || echo "Could not inspect traefik-public network"
args:
executable: /bin/bash
register: network_info
ignore_errors: yes
failed_when: false
- name: Display network info
debug:
msg: "{{ network_info.stdout_lines }}"
- name: Check if Gitea is in traefik-public network
shell: |
docker network inspect traefik-public 2>&1 | grep -i "gitea" || echo "Gitea not found in traefik-public network"
register: gitea_in_network
ignore_errors: yes
failed_when: false
- name: Display Gitea network membership
debug:
msg: "{{ gitea_in_network.stdout_lines }}"
- name: Check Gitea container configuration
shell: |
echo "=== Gitea Container Labels ==="
docker inspect gitea --format '{{ '{{' }}range .Config.Labels{{ '}}' }}{{ '{{' }}.Key{{ '}}' }}={{ '{{' }}.Value{{ '}}' }}{{ '{{' }}\n{{ '}}' }}{{ '{{' }}end{{ '}}' }}' 2>&1 | grep -i traefik || echo "No Traefik labels found"
register: gitea_labels
ignore_errors: yes
failed_when: false
- name: Display Gitea labels
debug:
msg: "{{ gitea_labels.stdout_lines }}"
- name: Check Traefik service registration
shell: |
docker exec traefik wget -qO- http://localhost:8080/api/http/services 2>&1 | grep -i gitea || echo "Gitea service not found in Traefik API"
register: traefik_service
ignore_errors: yes
failed_when: false
- name: Display Traefik service registration
debug:
msg: "{{ traefik_service.stdout_lines }}"
- name: Test external Gitea access
shell: |
echo "=== Testing External Gitea Access ==="
curl -k -H "User-Agent: Mozilla/5.0" -s -o /dev/null -w "HTTP Status: %{http_code}\n" https://git.michaelschiemer.de/ 2>&1 || echo "Connection failed"
args:
executable: /bin/bash
register: external_test
ignore_errors: yes
failed_when: false
- name: Display external test result
debug:
msg: "{{ external_test.stdout_lines }}"
- name: Summary
debug:
msg:
- "=== DIAGNOSIS SUMMARY ==="
- "1. Check if Gitea container is running"
- "2. Check if Gitea is in traefik-public network"
- "3. Check Gitea health endpoint (port 3000)"
- "4. Check Traefik can reach Gitea"
- "5. Check Traefik logs for errors"
- ""
- "Common issues:"
- "- Container not running: Restart with 'docker compose up -d' in {{ gitea_stack_path }}"
- "- Not in network: Recreate container or add to network"
- "- Health check failing: Check Gitea logs for errors"
- "- Traefik can't reach: Check network configuration"

View File

@@ -0,0 +1,70 @@
---
- name: Check Traefik Gitea Configuration
hosts: production
gather_facts: yes
become: no
vars:
traefik_stack_path: "{{ stacks_base_path }}/traefik"
tasks:
- name: Check Traefik logs for Gitea errors
shell: |
cd {{ traefik_stack_path }}
echo "=== Traefik Logs - Gitea errors (Last 50 lines) ==="
docker compose logs --tail=100 traefik 2>&1 | grep -i "gitea\|502\|bad gateway" | tail -50 || echo "No Gitea-related errors found"
args:
executable: /bin/bash
register: traefik_errors
ignore_errors: yes
failed_when: false
- name: Display Traefik errors
debug:
msg: "{{ traefik_errors.stdout_lines }}"
- name: Check dynamic Gitea configuration on server
shell: |
cat {{ traefik_stack_path }}/dynamic/gitea.yml 2>&1 || echo "File not found"
register: gitea_dynamic_config
ignore_errors: yes
failed_when: false
- name: Display dynamic Gitea config
debug:
msg: "{{ gitea_dynamic_config.stdout_lines }}"
- name: Test if Traefik can resolve gitea hostname
shell: |
docker exec traefik getent hosts gitea 2>&1 || echo "Cannot resolve gitea hostname"
register: traefik_resolve
ignore_errors: yes
failed_when: false
- name: Display Traefik resolve result
debug:
msg: "{{ traefik_resolve.stdout_lines }}"
- name: Get Gitea container IP
shell: |
docker inspect gitea --format '{{ '{{' }}range.NetworkSettings.Networks{{ '}}' }}{{ '{{' }}if eq .NetworkID (printf "%s" (docker network inspect traefik-public --format "{{ '{{' }}.Id{{ '}}' }}")){{ '}}' }}{{ '{{' }}.IPAddress{{ '}}' }}{{ '{{' }}end{{ '}}' }}{{ '{{' }}end{{ '}}' }}' 2>&1 || echo "Could not get IP"
register: gitea_ip
ignore_errors: yes
failed_when: false
- name: Display Gitea IP
debug:
msg: "Gitea IP in traefik-public network: {{ gitea_ip.stdout }}"
- name: Test connectivity from Traefik to Gitea IP
shell: |
GITEA_IP="{{ gitea_ip.stdout | default('172.21.0.3') }}"
docker exec traefik wget -qO- --timeout=5 "http://$GITEA_IP:3000/api/healthz" 2>&1 || echo "Cannot connect to Gitea at $GITEA_IP:3000"
register: traefik_connect
ignore_errors: yes
failed_when: false
when: gitea_ip.stdout is defined and gitea_ip.stdout != ""
- name: Display connectivity result
debug:
msg: "{{ traefik_connect.stdout_lines }}"

View File

@@ -50,21 +50,34 @@
group: "{{ ansible_user }}"
mode: '0755'
- name: Check if docker-compose.yml exists in application stack
- name: Check if docker-compose.base.yml exists in application stack
stat:
path: "{{ app_stack_path }}/docker-compose.yml"
register: compose_file_exists
path: "{{ app_stack_path }}/docker-compose.base.yml"
register: compose_base_exists
when: not (application_sync_files | default(false) | bool)
- name: Fail if docker-compose.yml doesn't exist
- name: Check if docker-compose.production.yml exists in application stack
stat:
path: "{{ app_stack_path }}/docker-compose.production.yml"
register: compose_prod_exists
when: not (application_sync_files | default(false) | bool)
- name: Fail if docker-compose files don't exist
fail:
msg: |
Application Stack docker-compose.yml not found at {{ app_stack_path }}/docker-compose.yml
Application Stack docker-compose files not found at {{ app_stack_path }}
Required files:
- docker-compose.base.yml
- docker-compose.production.yml
The Application Stack must be deployed first via:
ansible-playbook -i inventory/production.yml playbooks/setup-infrastructure.yml
This will create the application stack with docker-compose.yml and .env file.
when: not compose_file_exists.stat.exists
This will create the application stack with docker-compose files and .env file.
when:
- not (application_sync_files | default(false) | bool)
- (not compose_base_exists.stat.exists or not compose_prod_exists.stat.exists)
- name: Create backup directory
file:
@@ -75,31 +88,47 @@
mode: '0755'
tasks:
- name: Verify docker-compose.yml exists
- name: Verify docker-compose files exist
stat:
path: "{{ app_stack_path }}/docker-compose.yml"
register: compose_file_check
path: "{{ app_stack_path }}/docker-compose.base.yml"
register: compose_base_check
when: not (application_sync_files | default(false) | bool)
- name: Verify docker-compose.production.yml exists
stat:
path: "{{ app_stack_path }}/docker-compose.production.yml"
register: compose_prod_check
when: not (application_sync_files | default(false) | bool)
- name: Fail if docker-compose.yml doesn't exist
- name: Fail if docker-compose files don't exist
fail:
msg: |
Application Stack docker-compose.yml not found at {{ app_stack_path }}/docker-compose.yml
Application Stack docker-compose files not found at {{ app_stack_path }}
Required files:
- docker-compose.base.yml
- docker-compose.production.yml
The Application Stack must be deployed first via:
ansible-playbook -i inventory/production.yml playbooks/setup-infrastructure.yml
This will create the application stack with docker-compose.yml and .env file.
when: not compose_file_check.stat.exists
This will create the application stack with docker-compose files and .env file.
when:
- not (application_sync_files | default(false) | bool)
- (not compose_base_check.stat.exists or not compose_prod_check.stat.exists)
- name: Backup current deployment metadata
shell: |
docker compose -f {{ app_stack_path }}/docker-compose.yml ps --format json 2>/dev/null > {{ backups_path }}/{{ deployment_timestamp | regex_replace(':', '-') }}/current_containers.json || true
docker compose -f {{ app_stack_path }}/docker-compose.yml config 2>/dev/null > {{ backups_path }}/{{ deployment_timestamp | regex_replace(':', '-') }}/docker-compose-config.yml || true
docker compose -f {{ app_stack_path }}/docker-compose.base.yml -f {{ app_stack_path }}/docker-compose.production.yml ps --format json 2>/dev/null > {{ backups_path }}/{{ deployment_timestamp | regex_replace(':', '-') }}/current_containers.json || true
docker compose -f {{ app_stack_path }}/docker-compose.base.yml -f {{ app_stack_path }}/docker-compose.production.yml config 2>/dev/null > {{ backups_path }}/{{ deployment_timestamp | regex_replace(':', '-') }}/docker-compose-config.yml || true
args:
executable: /bin/bash
changed_when: false
ignore_errors: yes
when: compose_file_check.stat.exists
when:
- not (application_sync_files | default(false) | bool)
- compose_base_exists.stat.exists | default(false)
- compose_prod_exists.stat.exists | default(false)
- name: Login to Docker registry (if credentials provided)
community.docker.docker_login:
@@ -128,9 +157,19 @@
msg: "Failed to pull image {{ app_image }}:{{ image_tag }}"
when: image_pull.failed
- name: Update docker-compose.yml with new image tag (all services)
# Sync files first if application_sync_files=true (before updating docker-compose.production.yml)
- name: Sync application stack files
import_role:
name: application
vars:
application_sync_files: "{{ application_sync_files | default(false) }}"
application_compose_recreate: "never" # Don't recreate yet, just sync files
application_remove_orphans: false
when: application_sync_files | default(false) | bool
- name: Update docker-compose.production.yml with new image tag (all services)
replace:
path: "{{ app_stack_path }}/docker-compose.yml"
path: "{{ app_stack_path }}/docker-compose.production.yml"
# Match both localhost:5000 and registry.michaelschiemer.de (or any registry URL)
regexp: '^(\s+image:\s+)(localhost:5000|registry\.michaelschiemer\.de|{{ docker_registry }})/{{ app_name }}:.*$'
replace: '\1{{ app_image }}:{{ image_tag }}'
@@ -142,13 +181,13 @@
import_role:
name: application
vars:
application_sync_files: false
application_sync_files: false # Already synced above, don't sync again
application_compose_recreate: "always"
application_remove_orphans: true
- name: Get deployed image information
shell: |
docker compose -f {{ app_stack_path }}/docker-compose.yml config | grep -E "^\s+image:" | head -1 | awk '{print $2}' || echo "unknown"
docker compose -f {{ app_stack_path }}/docker-compose.base.yml -f {{ app_stack_path }}/docker-compose.production.yml config | grep -E "^\s+image:" | head -1 | awk '{print $2}' || echo "unknown"
args:
executable: /bin/bash
register: deployed_image

View File

@@ -0,0 +1,143 @@
---
- name: Fix Gitea Traefik Configuration - Remove Dynamic Config and Use Labels
hosts: production
gather_facts: yes
become: no
vars:
traefik_stack_path: "{{ stacks_base_path }}/traefik"
gitea_stack_path: "{{ stacks_base_path }}/gitea"
tasks:
- name: Backup dynamic Gitea configuration
shell: |
cd {{ traefik_stack_path }}/dynamic
if [ -f gitea.yml ]; then
cp gitea.yml gitea.yml.backup-$(date +%Y%m%d-%H%M%S)
echo "Backed up to gitea.yml.backup-$(date +%Y%m%d-%H%M%S)"
else
echo "File not found, nothing to backup"
fi
args:
executable: /bin/bash
register: backup_result
ignore_errors: yes
failed_when: false
- name: Display backup result
debug:
msg: "{{ backup_result.stdout_lines }}"
- name: Remove dynamic Gitea configuration
file:
path: "{{ traefik_stack_path }}/dynamic/gitea.yml"
state: absent
register: remove_config
- name: Restart Traefik to reload configuration
community.docker.docker_compose_v2:
project_src: "{{ traefik_stack_path }}"
state: present
pull: never
recreate: always
services:
- traefik
register: traefik_restart
when: remove_config.changed
- name: Wait for Traefik to be ready
wait_for:
port: 443
host: localhost
timeout: 30
delegate_to: localhost
when: traefik_restart.changed
ignore_errors: yes
- name: Check if Gitea docker-compose.yml already has Traefik labels
shell: |
grep -q "traefik.enable=true" {{ gitea_stack_path }}/docker-compose.yml && echo "Labels already present" || echo "Labels missing"
register: labels_check
ignore_errors: yes
failed_when: false
- name: Copy docker-compose.yml from local to ensure labels are present
copy:
src: "{{ playbook_dir }}/../../stacks/gitea/docker-compose.yml"
dest: "{{ gitea_stack_path }}/docker-compose.yml"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0644'
register: labels_added
when: "'Labels missing' in labels_check.stdout"
- name: Recreate Gitea container with labels
community.docker.docker_compose_v2:
project_src: "{{ gitea_stack_path }}"
state: present
pull: never
recreate: always
remove_orphans: no
register: gitea_recreate
when: labels_added.changed
- name: Wait for Gitea to be healthy
shell: |
for i in {1..30}; do
if docker exec gitea curl -f http://localhost:3000/api/healthz >/dev/null 2>&1; then
echo "Gitea is healthy"
exit 0
fi
echo "Waiting for Gitea... ($i/30)"
sleep 2
done
echo "Health check timeout"
exit 1
args:
executable: /bin/bash
register: health_wait
ignore_errors: yes
failed_when: false
when: gitea_recreate.changed
- name: Display health wait result
debug:
msg: "{{ health_wait.stdout_lines }}"
when: gitea_recreate.changed
- name: Check Traefik service registration
shell: |
sleep 5 # Give Traefik time to discover
docker exec traefik wget -qO- http://localhost:8080/api/http/services 2>&1 | grep -i gitea || echo "Service not found (may take a few seconds)"
register: traefik_service
ignore_errors: yes
failed_when: false
- name: Display Traefik service registration
debug:
msg: "{{ traefik_service.stdout_lines }}"
- name: Test external Gitea access
shell: |
sleep 3 # Give Traefik time to update routing
curl -k -H "User-Agent: Mozilla/5.0" -s -o /dev/null -w "HTTP Status: %{http_code}\n" https://git.michaelschiemer.de/ 2>&1 || echo "Connection failed"
args:
executable: /bin/bash
register: external_test
ignore_errors: yes
failed_when: false
- name: Display external test result
debug:
msg: "{{ external_test.stdout_lines }}"
- name: Summary
debug:
msg:
- "=== FIX SUMMARY ==="
- "Dynamic config removed: {{ 'Yes' if remove_config.changed else 'Already removed' }}"
- "Labels added to docker-compose.yml: {{ 'Yes' if labels_added.changed else 'Already present' }}"
- "Gitea container recreated: {{ 'Yes' if gitea_recreate.changed else 'No' }}"
- ""
- "Gitea should now be accessible via https://git.michaelschiemer.de"
- "If issue persists, check Traefik logs for errors"

View File

@@ -0,0 +1,139 @@
---
- name: Fix Gitea Traefik Labels
hosts: production
gather_facts: yes
become: no
vars:
gitea_stack_path: "{{ stacks_base_path }}/gitea"
tasks:
- name: Check current Gitea container status
shell: |
cd {{ gitea_stack_path }}
docker compose ps gitea
args:
executable: /bin/bash
register: gitea_status_before
ignore_errors: yes
failed_when: false
- name: Display current status
debug:
msg: "{{ gitea_status_before.stdout_lines }}"
- name: Check current Traefik labels
shell: |
docker inspect gitea --format '{{ '{{' }}range .Config.Labels{{ '}}' }}{{ '{{' }}.Key{{ '}}' }}={{ '{{' }}.Value{{ '}}' }}{{ '{{' }}\n{{ '}}' }}{{ '{{' }}end{{ '}}' }}' 2>&1 | grep -i traefik || echo "No Traefik labels found"
register: current_labels
ignore_errors: yes
failed_when: false
- name: Display current labels
debug:
msg: "{{ current_labels.stdout_lines }}"
- name: Recreate Gitea container with Traefik labels
community.docker.docker_compose_v2:
project_src: "{{ gitea_stack_path }}"
state: present
pull: never
recreate: always
remove_orphans: no
register: gitea_recreate
- name: Wait for Gitea to be ready
wait_for:
port: 3000
host: localhost
timeout: 60
delegate_to: localhost
when: gitea_recreate.changed
ignore_errors: yes
- name: Wait for Gitea health check
shell: |
for i in {1..30}; do
if docker exec gitea curl -f http://localhost:3000/api/healthz >/dev/null 2>&1; then
echo "Gitea is healthy"
exit 0
fi
echo "Waiting for Gitea to be healthy... ($i/30)"
sleep 2
done
echo "Gitea health check timeout"
exit 1
args:
executable: /bin/bash
register: health_wait
ignore_errors: yes
failed_when: false
when: gitea_recreate.changed
- name: Display health wait result
debug:
msg: "{{ health_wait.stdout_lines }}"
when: gitea_recreate.changed
- name: Check new Gitea container status
shell: |
cd {{ gitea_stack_path }}
docker compose ps gitea
args:
executable: /bin/bash
register: gitea_status_after
ignore_errors: yes
failed_when: false
- name: Display new status
debug:
msg: "{{ gitea_status_after.stdout_lines }}"
- name: Check new Traefik labels
shell: |
docker inspect gitea --format '{{ '{{' }}range .Config.Labels{{ '}}' }}{{ '{{' }}.Key{{ '}}' }}={{ '{{' }}.Value{{ '}}' }}{{ '{{' }}\n{{ '}}' }}{{ '{{' }}end{{ '}}' }}' 2>&1 | grep -i traefik || echo "No Traefik labels found"
register: new_labels
ignore_errors: yes
failed_when: false
- name: Display new labels
debug:
msg: "{{ new_labels.stdout_lines }}"
- name: Check Traefik service registration
shell: |
docker exec traefik wget -qO- http://localhost:8080/api/http/services 2>&1 | grep -i gitea || echo "Gitea service not found (may take a few seconds to register)"
register: traefik_service
ignore_errors: yes
failed_when: false
- name: Display Traefik service registration
debug:
msg: "{{ traefik_service.stdout_lines }}"
- name: Test external Gitea access
shell: |
echo "Testing external access..."
sleep 5 # Give Traefik time to update
curl -k -H "User-Agent: Mozilla/5.0" -s -o /dev/null -w "HTTP Status: %{http_code}\n" https://git.michaelschiemer.de/ 2>&1 || echo "Connection failed"
args:
executable: /bin/bash
register: external_test
ignore_errors: yes
failed_when: false
- name: Display external test result
debug:
msg: "{{ external_test.stdout_lines }}"
- name: Summary
debug:
msg:
- "=== FIX SUMMARY ==="
- "Container recreated: {{ 'Yes' if gitea_recreate.changed else 'No' }}"
- "Traefik labels: {{ 'Fixed' if 'traefik' in new_labels.stdout|lower else 'Still missing' }}"
- ""
- "If the issue persists:"
- "1. Check Traefik logs: cd {{ stacks_base_path }}/traefik && docker compose logs traefik"
- "2. Verify Traefik can reach Gitea: docker exec traefik ping -c 2 gitea"
- "3. Check Gitea logs for errors: cd {{ gitea_stack_path }} && docker compose logs gitea"

View File

@@ -10,6 +10,7 @@
wireguard_config_file: "{{ wireguard_config_path }}/{{ wireguard_interface }}.conf"
wireguard_client_configs_path: "/etc/wireguard/clients"
wireguard_local_client_configs_dir: "{{ playbook_dir }}/../wireguard-clients"
wireguard_dns_servers: []
tasks:
- name: Validate client name
@@ -80,18 +81,19 @@
- name: Extract server IP from config
set_fact:
server_vpn_ip: "{{ (wireguard_server_config_read.content | b64decode | regex_search('Address = ([0-9.]+)')) | default(['10.8.0.1']) | first }}"
server_vpn_ip: "{{ (wireguard_server_config_read.content | b64decode | regex_search('Address = ([0-9.]+)', '\\\\1')) | first | default('10.8.0.1') }}"
failed_when: false
- name: Set default DNS servers
set_fact:
wireguard_dns_servers: "{{ [server_vpn_ip] }}"
- name: Extract WireGuard server IP octets
set_fact:
wireguard_server_ip_octets: "{{ server_vpn_ip.split('.') }}"
wireguard_server_ip_octets: "{{ (server_vpn_ip | default('')).split('.') }}"
when: client_ip == ""
- name: Fail if server VPN IP is invalid
fail:
msg: "Server VPN IP '{{ server_vpn_ip }}' ist ungültig bitte wg0.conf prüfen."
when: client_ip == "" and (wireguard_server_ip_octets | length) < 4
- name: Gather existing client addresses
set_fact:
existing_client_ips: "{{ (wireguard_server_config_read.content | b64decode | regex_findall('AllowedIPs = ([0-9A-Za-z.]+)/32', '\\\\1')) }}"
@@ -109,7 +111,7 @@
wireguard_server_ip_octets[2],
next_octet_candidate
] | join('.') }}"
when: client_ip == ""
when: client_ip == "" and (wireguard_server_ip_octets | length) >= 4
- name: Generate NEW client private key
command: "wg genkey"

View File

@@ -35,45 +35,37 @@
file: "{{ vault_file }}"
no_log: yes
- name: Ensure secrets directory exists
- name: Ensure secrets directory exists for Docker Compose secrets
file:
path: "{{ secrets_path }}"
path: "{{ app_stack_path }}/secrets"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0700'
- name: Create .env.production file
template:
src: "{{ playbook_dir }}/../templates/.env.production.j2"
dest: "{{ secrets_path }}/.env.production"
- name: Create Docker Compose secret files from vault
copy:
content: "{{ item.value }}"
dest: "{{ app_stack_path }}/secrets/{{ item.name }}.txt"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0600'
no_log: yes
- name: Create Docker secrets from vault (disabled for compose-only deployment)
docker_secret:
name: "{{ item.name }}"
data: "{{ item.value }}"
state: present
loop:
- name: db_password
- name: db_user_password
value: "{{ vault_db_password }}"
- name: redis_password
value: "{{ vault_redis_password }}"
- name: app_key
value: "{{ vault_app_key }}"
- name: jwt_secret
value: "{{ vault_jwt_secret }}"
- name: mail_password
value: "{{ vault_mail_password }}"
- name: vault_encryption_key
value: "{{ vault_encryption_key | default(vault_app_key) }}"
- name: git_token
value: "{{ vault_git_token | default('') }}"
no_log: yes
when: false
- name: Set secure permissions on secrets directory
file:
path: "{{ secrets_path }}"
path: "{{ app_stack_path }}/secrets"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"

View File

@@ -1,6 +1,8 @@
---
# Source path for application stack files on the control node
application_stack_src: "{{ role_path }}/../../stacks/application"
# Use playbook_dir as base, then go to ../stacks/application
# This assumes playbooks are in deployment/ansible/playbooks
application_stack_src: "{{ playbook_dir | default(role_path + '/..') }}/../stacks/application"
# Destination path on the target host (defaults to configured app_stack_path)
application_stack_dest: "{{ app_stack_path | default(stacks_base_path + '/application') }}"

View File

@@ -10,7 +10,7 @@
- name: Wait for application container to report Up
shell: |
docker compose -f {{ application_stack_dest }}/docker-compose.yml ps app | grep -Eiq "Up|running"
docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.production.yml ps php | grep -Eiq "Up|running"
register: application_app_running
changed_when: false
until: application_app_running.rc == 0
@@ -20,7 +20,7 @@
- name: Ensure app container is running before migrations
shell: |
docker compose -f {{ application_stack_dest }}/docker-compose.yml ps app | grep -Eiq "Up|running"
docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.production.yml ps php | grep -Eiq "Up|running"
args:
executable: /bin/bash
register: application_app_container_running
@@ -30,7 +30,7 @@
- name: Run database migrations
shell: |
docker compose -f {{ application_stack_dest }}/docker-compose.yml exec -T app {{ application_migration_command }}
docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.production.yml exec -T php {{ application_migration_command }}
args:
executable: /bin/bash
register: application_migration_result
@@ -43,7 +43,7 @@
- application_app_container_running.rc == 0
- name: Collect application container status
shell: docker compose -f {{ application_stack_dest }}/docker-compose.yml ps
shell: docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.production.yml ps
register: application_ps
changed_when: false
ignore_errors: yes

View File

@@ -17,6 +17,7 @@
file: "{{ application_vault_file }}"
when: application_vault_stat.stat.exists
no_log: yes
ignore_errors: yes
delegate_to: localhost
become: no
@@ -72,21 +73,57 @@
application_encryption_key: "{{ encryption_key | default(vault_encryption_key | default('')) }}"
no_log: yes
- name: Check if application docker-compose source exists locally
- name: Check if application docker-compose.base.yml source exists locally
stat:
path: "{{ application_stack_src }}/docker-compose.base.yml"
delegate_to: localhost
register: application_compose_base_src
become: no
- name: Check if application docker-compose.production.yml source exists locally
stat:
path: "{{ application_stack_src }}/../../../docker-compose.production.yml"
delegate_to: localhost
register: application_compose_prod_src
become: no
- name: Copy application docker-compose.base.yml to target host
copy:
src: "{{ application_stack_src }}/docker-compose.base.yml"
dest: "{{ application_stack_dest }}/docker-compose.base.yml"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0644'
when: application_compose_base_src.stat.exists
- name: Copy application docker-compose.production.yml to target host
copy:
src: "{{ application_stack_src }}/../../../docker-compose.production.yml"
dest: "{{ application_stack_dest }}/docker-compose.production.yml"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0644'
when: application_compose_prod_src.stat.exists
- name: Check if legacy docker-compose.yml exists (fallback)
stat:
path: "{{ application_stack_src }}/docker-compose.yml"
delegate_to: localhost
register: application_compose_src
become: no
when: not (application_compose_base_src.stat.exists | default(false))
- name: Copy application docker-compose to target host
- name: Copy application docker-compose.yml to target host (fallback for legacy)
copy:
src: "{{ application_stack_src }}/docker-compose.yml"
dest: "{{ application_stack_dest }}/docker-compose.yml"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0644'
when: application_compose_src.stat.exists
when:
- application_compose_src is defined
- application_compose_src.stat.exists | default(false)
- not (application_compose_base_src.stat.exists | default(false))
- name: Check if nginx configuration exists locally
stat:

View File

@@ -11,7 +11,9 @@ APP_DOMAIN={{ app_domain }}
APP_ENV={{ app_env | default('production') }}
APP_DEBUG={{ app_debug | default('false') }}
APP_NAME={{ app_display_name | default(app_name | default('Framework') | replace('-', ' ') | title) }}
APP_KEY={{ app_key }}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
# APP_KEY is loaded from /run/secrets/app_key via APP_KEY_FILE
APP_KEY_FILE=/run/secrets/app_key
APP_TIMEZONE={{ app_timezone | default(timezone | default('Europe/Berlin')) }}
APP_LOCALE={{ app_locale | default('de') }}
APP_URL=https://{{ app_domain }}
@@ -25,18 +27,22 @@ DB_HOST={{ db_host | default('postgres') }}
DB_PORT={{ db_port | default('5432') }}
DB_DATABASE={{ db_name | default(db_name_default) }}
DB_USERNAME={{ db_user | default(db_user_default) }}
DB_PASSWORD={{ db_password }}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
# DB_PASSWORD is loaded from /run/secrets/db_user_password via DB_PASSWORD_FILE
DB_PASSWORD_FILE=/run/secrets/db_user_password
DB_CHARSET={{ db_charset | default('utf8') }}
# Legacy variables (kept for backward compatibility)
DB_NAME={{ db_name | default(db_name_default) }}
DB_USER={{ db_user | default(db_user_default) }}
DB_PASS={{ db_password }}
# DB_PASS is loaded from Docker Secret via DB_PASSWORD_FILE
# Redis Configuration
# Redis runs in this stack
REDIS_HOST={{ redis_host | default('redis') }}
REDIS_PORT={{ redis_port | default('6379') }}
REDIS_PASSWORD={{ redis_password }}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
# REDIS_PASSWORD is loaded from /run/secrets/redis_password via REDIS_PASSWORD_FILE
REDIS_PASSWORD_FILE=/run/secrets/redis_password
# Security Configuration
SECURITY_ALLOWED_HOSTS={{ security_allowed_hosts | default('localhost,' ~ app_domain ~ ',www.' ~ app_domain) }}
@@ -59,11 +65,17 @@ QUEUE_WORKER_TRIES={{ queue_worker_tries | default('3') }}
QUEUE_WORKER_TIMEOUT={{ queue_worker_timeout | default('60') }}
# Vault / Encryption
VAULT_ENCRYPTION_KEY={{ encryption_key }}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
# VAULT_ENCRYPTION_KEY is loaded from /run/secrets/vault_encryption_key via VAULT_ENCRYPTION_KEY_FILE
VAULT_ENCRYPTION_KEY_FILE=/run/secrets/vault_encryption_key
# APP_KEY is loaded from /run/secrets/app_key via APP_KEY_FILE
APP_KEY_FILE=/run/secrets/app_key
# Git Repository Configuration (optional - if set, container will clone/pull code on start)
GIT_REPOSITORY_URL={{ git_repository_url | default('') }}
GIT_BRANCH={{ git_branch | default('main') }}
GIT_TOKEN={{ git_token | default('') }}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
# GIT_TOKEN is loaded from /run/secrets/git_token via GIT_TOKEN_FILE
GIT_TOKEN_FILE=/run/secrets/git_token
GIT_USERNAME={{ git_username | default('') }}
GIT_PASSWORD={{ git_password | default('') }}

View File

@@ -8,8 +8,10 @@ PrivateKey = {{ client_private_key.stdout }}
# Client IP address in VPN network
Address = {{ client_ip }}/24
# DNS server (VPN internal resolver)
{% if wireguard_dns_servers | length > 0 %}
# DNS servers provided via Ansible (optional)
DNS = {{ wireguard_dns_servers | join(', ') }}
{% endif %}
[Peer]
# Server public key

View File

@@ -0,0 +1,213 @@
# Base Docker Compose Configuration
# This file contains shared service definitions, networks, and volumes.
# Use with environment-specific override files:
# - docker-compose.local.yml (local development)
# - docker-compose.staging.yml (staging environment)
# - docker-compose.production.yml (production environment)
#
# Usage:
# Local: docker-compose -f docker-compose.base.yml -f docker-compose.local.yml up
# Staging: docker-compose -f docker-compose.base.yml -f docker-compose.staging.yml up
# Production: docker-compose -f docker-compose.base.yml -f docker-compose.production.yml up
services:
web:
build:
context: docker/nginx
dockerfile: Dockerfile
healthcheck:
test: ["CMD", "nc", "-z", "127.0.0.1", "443"]
interval: 30s
timeout: 10s
retries: 3
depends_on:
php:
condition: service_started
networks:
- frontend
- backend
php:
build:
context: .
dockerfile: docker/php/Dockerfile
args:
- ENV=${APP_ENV:-dev}
- COMPOSER_INSTALL_FLAGS=${COMPOSER_INSTALL_FLAGS:---no-scripts --no-autoloader}
healthcheck:
test: [ "CMD", "php", "-v" ]
interval: 30s
timeout: 10s
retries: 3
networks:
- backend
- cache
volumes:
# Shared Volume für Composer-Cache über Container-Neustarts hinweg
- composer-cache:/root/.composer/cache
# Docker-Volumes für Performance (keine Host-Sync nötig)
- storage-cache:/var/www/html/storage/cache:rw
- storage-queue:/var/www/html/storage/queue:rw
- storage-discovery:/var/www/html/storage/discovery:rw
- var-data:/var/www/html/var:rw
php-test:
build:
context: .
dockerfile: docker/php/Dockerfile.test
user: "1000:1000"
profiles:
- test
volumes:
- composer-cache:/home/appuser/.composer/cache
- storage-cache:/var/www/html/storage/cache:rw
- storage-queue:/var/www/html/storage/queue:rw
- storage-discovery:/var/www/html/storage/discovery:rw
- var-data:/var/www/html/var:rw
environment:
APP_ENV: testing
APP_DEBUG: true
DB_HOST: db
REDIS_HOST: redis
networks:
- backend
- cache
entrypoint: []
command: ["php", "-v"]
db:
image: postgres:16-alpine
environment:
POSTGRES_DB: ${DB_DATABASE:-michaelschiemer}
POSTGRES_USER: ${DB_USERNAME:-postgres}
# SECURITY: POSTGRES_PASSWORD must be set explicitly (no hardcoded fallback)
# Set DB_PASSWORD in .env.local for local development
# Use Docker Secrets in production/staging via DB_PASSWORD_FILE
POSTGRES_PASSWORD: ${DB_PASSWORD}
# Performance & Connection Settings
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
PGDATA: /var/lib/postgresql/data/pgdata
volumes:
- db_data:/var/lib/postgresql/data
- "${DB_CONFIG_PATH:-./docker/postgres/postgresql.conf}:/etc/postgresql/postgresql.conf:ro"
- "${DB_INIT_PATH:-./docker/postgres/init}:/docker-entrypoint-initdb.d:ro"
command:
- "postgres"
- "-c"
- "config_file=/etc/postgresql/postgresql.conf"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USERNAME:-postgres} -d ${DB_DATABASE:-michaelschiemer}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
networks:
- backend
redis:
image: redis:7-alpine
volumes:
- "${REDIS_CONFIG_PATH:-./docker/redis/redis.conf}:/usr/local/etc/redis/redis.conf:ro"
- redis_data:/data
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
networks:
- cache
queue-worker:
build:
context: .
dockerfile: docker/worker/Dockerfile
entrypoint: "" # Override any entrypoint
command: ["php", "/var/www/html/worker.php"] # Direct command execution
depends_on:
php:
condition: service_healthy
redis:
condition: service_healthy
db:
condition: service_healthy
volumes:
# Use same storage volumes as PHP container for consistency
- storage-cache:/var/www/html/storage/cache:rw
- storage-queue:/var/www/html/storage/queue:rw
- storage-discovery:/var/www/html/storage/discovery:rw
- var-data:/var/www/html/var:rw
networks:
- backend
- cache
# Graceful shutdown timeout
stop_grace_period: 30s
minio:
image: minio/minio:latest
environment:
- TZ=Europe/Berlin
# SECURITY: MINIO credentials must be set explicitly (no hardcoded fallback)
# Set MINIO_ROOT_USER and MINIO_ROOT_PASSWORD in .env.local for local development
# Use Docker Secrets in production/staging for production deployments
- MINIO_ROOT_USER=${MINIO_ROOT_USER}
- MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD}
command: server /data --console-address ":9001"
volumes:
- minio_data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
networks:
- backend
networks:
frontend:
driver: bridge
backend:
driver: bridge
cache:
driver: bridge
volumes:
redis_data:
composer-cache:
storage-cache: # Cache-Verzeichnis (Performance-kritisch)
storage-queue: # Queue-Verzeichnis (Performance-kritisch)
storage-discovery: # Discovery-Cache (Framework-intern)
var-data:
db_data:
project-data:
worker-logs:
worker-queue:
worker-storage: # Complete separate storage for worker with correct permissions
minio_data: # MinIO object storage data
# Docker Secrets Configuration
# Secrets are defined here but activated in environment-specific override files
secrets:
db_root_password:
file: ./secrets/db_root_password.txt
external: false
db_user_password:
file: ./secrets/db_user_password.txt
external: false
redis_password:
file: ./secrets/redis_password.txt
external: false
app_key:
file: ./secrets/app_key.txt
external: false
vault_encryption_key:
file: ./secrets/vault_encryption_key.txt
external: false
git_token:
file: ./secrets/git_token.txt
external: false

View File

@@ -25,11 +25,16 @@ services:
- DB_PORT=${DB_PORT:-5432}
- DB_DATABASE=${DB_DATABASE}
- DB_USERNAME=${DB_USERNAME}
- DB_PASSWORD=${DB_PASSWORD}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- DB_PASSWORD_FILE=/run/secrets/db_user_password
# Redis
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
secrets:
- db_user_password
- redis_password
# Cache
- CACHE_DRIVER=redis
- CACHE_PREFIX=${CACHE_PREFIX:-app}
@@ -181,22 +186,24 @@ services:
- app-internal
environment:
- TZ=Europe/Berlin
secrets:
- redis_password
command: >
redis-server
--requirepass ${REDIS_PASSWORD}
sh -c "redis-server
--requirepass $$(cat /run/secrets/redis_password)
--maxmemory 512mb
--maxmemory-policy allkeys-lru
--save 900 1
--save 300 10
--save 60 10000
--appendonly yes
--appendfsync everysec
--appendfsync everysec"
volumes:
- redis-data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
healthcheck:
test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
test: ["CMD", "sh", "-c", "redis-cli --no-auth-warning -a $$(cat /run/secrets/redis_password) ping"]
interval: 30s
timeout: 10s
retries: 3
@@ -218,11 +225,16 @@ services:
- DB_PORT=${DB_PORT:-5432}
- DB_DATABASE=${DB_DATABASE}
- DB_USERNAME=${DB_USERNAME}
- DB_PASSWORD=${DB_PASSWORD}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- DB_PASSWORD_FILE=/run/secrets/db_user_password
# Redis
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
secrets:
- db_user_password
- redis_password
# Queue
- QUEUE_DRIVER=redis
- QUEUE_CONNECTION=default
@@ -234,6 +246,9 @@ services:
- app-logs:/var/www/html/storage/logs
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
secrets:
- db_user_password
- redis_password
command: php console.php queue:work --queue=default --timeout=${QUEUE_WORKER_TIMEOUT:-60}
healthcheck:
test: ["CMD-SHELL", "php -r 'exit(0);' && test -f /var/www/html/console.php || exit 1"]
@@ -263,11 +278,16 @@ services:
- DB_PORT=${DB_PORT:-5432}
- DB_DATABASE=${DB_DATABASE}
- DB_USERNAME=${DB_USERNAME}
- DB_PASSWORD=${DB_PASSWORD}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- DB_PASSWORD_FILE=/run/secrets/db_user_password
# Redis
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
secrets:
- db_user_password
- redis_password
volumes:
- app-storage:/var/www/html/storage
- app-logs:/var/www/html/storage/logs
@@ -300,6 +320,12 @@ volumes:
name: redis-data
external: true
secrets:
db_user_password:
file: ./secrets/db_user_password.txt
redis_password:
file: ./secrets/redis_password.txt
networks:
traefik-public:
external: true

View File

@@ -0,0 +1,48 @@
# Semaphore CI Stack - Environment Configuration
# Copy this file to .env and adjust values as needed
# ============================================
# MySQL Database Configuration
# ============================================
MYSQL_ROOT_PASSWORD=semaphore_root
MYSQL_DATABASE=semaphore
MYSQL_USER=semaphore
MYSQL_PASSWORD=semaphore
# ============================================
# Semaphore Configuration
# ============================================
# Port binding (default: 3000)
# Only accessible via localhost (127.0.0.1)
SEMAPHORE_PORT=3000
# Admin User Configuration
SEMAPHORE_ADMIN=admin
SEMAPHORE_ADMIN_NAME=Administrator
SEMAPHORE_ADMIN_EMAIL=admin@localhost
SEMAPHORE_ADMIN_PASSWORD=admin
# Playbook Storage Path (inside container)
SEMAPHORE_PLAYBOOK_PATH=/tmp/semaphore
# Access Key Encryption
# Generate with: head -c32 /dev/urandom | base64
# IMPORTANT: Change this in production!
SEMAPHORE_ACCESS_KEY_ENCRYPTION=change-me-in-production
# ============================================
# Optional: LDAP Configuration
# ============================================
# SEMAPHORE_LDAP_ENABLED=false
# SEMAPHORE_LDAP_HOST=ldap.example.com
# SEMAPHORE_LDAP_PORT=389
# SEMAPHORE_LDAP_DN=cn=admin,dc=example,dc=com
# SEMAPHORE_LDAP_PASSWORD=ldap_password
# SEMAPHORE_LDAP_BASE_DN=dc=example,dc=com
# SEMAPHORE_LDAP_USER_FILTER=(uid=%s)
# ============================================
# Optional: Webhook Configuration
# ============================================
# SEMAPHORE_WEBHOOK_URL=http://localhost:8080/webhook

View File

@@ -0,0 +1,556 @@
# Semaphore CI Stack - Lokale Entwicklung
## Übersicht
Selbst-gehostete Semaphore CI/CD-Plattform für lokale Entwicklung, die es ermöglicht, CI/CD-Pipelines und Ansible-Playbooks lokal zu testen und auszuführen, ohne Abhängigkeit von externen CI-Services.
**Features**:
- **Selbst-gehostet**: Läuft vollständig lokal auf dem Entwicklungsrechner
- **Isoliert**: Keine externen Zugriffe, nur localhost (127.0.0.1)
- **MySQL-Backend**: Persistente Datenbank für Projekte, Tasks und Templates
- **Web-UI**: Intuitive Benutzeroberfläche für Pipeline-Management
- **Ansible-Integration**: Native Unterstützung für Ansible-Playbooks
- **Docker-basiert**: Einfaches Setup und Wartung
**Einsatzzweck**:
- Lokales Testen von CI/CD-Pipelines
- Entwicklung und Test von Ansible-Playbooks
- Experimentieren mit Deployment-Workflows
- Keine Abhängigkeit von externen CI-Services
## Services
- **mysql** - MySQL 8.0 Datenbank für Semaphore-Daten
- **semaphore** - Semaphore CI/CD Web-UI und API
## Voraussetzungen
- Docker und Docker Compose installiert
- Port 3001 auf localhost frei verfügbar (3000 wird von Gitea verwendet)
- Ausreichend Speicherplatz für Docker Volumes (~500MB initial)
## Verzeichnisstruktur
```
semaphore/
├── docker-compose.yml # Service-Definitionen
├── env.example # Environment-Variablen Template
├── .env # Environment-Konfiguration (aus env.example erstellen)
└── README.md # Diese Datei
```
## Quick Start
### 1. Environment-Datei erstellen
```bash
cd deployment/stacks/semaphore
cp env.example .env
```
### 2. Konfiguration anpassen (Optional)
Bearbeite `.env` und passe die Werte an:
```bash
nano .env
```
**Wichtig**: Generiere einen sicheren Encryption Key:
```bash
# Linux/WSL
head -c32 /dev/urandom | base64
# Windows PowerShell
-join ((48..57) + (65..90) + (97..122) | Get-Random -Count 32 | % {[char]$_}) | ConvertTo-Base64
```
Aktualisiere `SEMAPHORE_ACCESS_KEY_ENCRYPTION` in der `.env`-Datei.
### 3. Stack starten
```bash
docker compose up -d
```
### 4. Semaphore Web-UI öffnen
Öffne im Browser: http://localhost:3001
**Standard-Login**:
- **Username**: `admin` (oder Wert aus `SEMAPHORE_ADMIN`)
- **Password**: `admin` (oder Wert aus `SEMAPHORE_ADMIN_PASSWORD`)
### 5. Erste Schritte in Semaphore
1. **Projekt erstellen**: Klicke auf "New Project" und erstelle ein neues Projekt
2. **Inventory anlegen**: Erstelle ein Inventory mit lokalen Hosts oder Docker-Containern
3. **Template erstellen**: Erstelle ein Template mit einem Ansible-Playbook
4. **Task ausführen**: Starte einen Task und beobachte die Ausführung
## Konfiguration
### Environment-Variablen (.env)
#### MySQL-Datenbank
```env
MYSQL_ROOT_PASSWORD=semaphore_root
MYSQL_DATABASE=semaphore
MYSQL_USER=semaphore
MYSQL_PASSWORD=semaphore
```
#### Semaphore-Konfiguration
```env
# Port-Binding (Standard: 3001)
SEMAPHORE_PORT=3001
# Admin-Benutzer
SEMAPHORE_ADMIN=admin
SEMAPHORE_ADMIN_NAME=Administrator
SEMAPHORE_ADMIN_EMAIL=admin@localhost
SEMAPHORE_ADMIN_PASSWORD=admin
# Playbook-Pfad (im Container)
SEMAPHORE_PLAYBOOK_PATH=/tmp/semaphore
# Encryption Key (WICHTIG: Für Produktion ändern!)
SEMAPHORE_ACCESS_KEY_ENCRYPTION=change-me-in-production
```
#### Optionale Konfiguration
**LDAP-Integration** (Standard: deaktiviert):
```env
SEMAPHORE_LDAP_ENABLED=true
SEMAPHORE_LDAP_HOST=ldap.example.com
SEMAPHORE_LDAP_PORT=389
SEMAPHORE_LDAP_DN=cn=admin,dc=example,dc=com
SEMAPHORE_LDAP_PASSWORD=ldap_password
SEMAPHORE_LDAP_BASE_DN=dc=example,dc=com
SEMAPHORE_LDAP_USER_FILTER=(uid=%s)
```
**Webhook-Integration**:
```env
SEMAPHORE_WEBHOOK_URL=http://localhost:8080/webhook
```
## Verwendung
### Stack starten
```bash
# Services im Hintergrund starten
docker compose up -d
# Logs anzeigen
docker compose logs -f
# Nur Semaphore-Logs
docker compose logs -f semaphore
# Nur MySQL-Logs
docker compose logs -f mysql
```
### Stack stoppen
```bash
docker compose down
```
### Stack neu starten
```bash
docker compose restart
```
### Status prüfen
```bash
# Container-Status anzeigen
docker compose ps
# Health-Check-Status
docker compose ps --format "table {{.Name}}\t{{.Status}}"
# Semaphore-Health-Check manuell
docker compose exec semaphore wget --no-verbose --spider http://localhost:3000/api/health
```
### Datenbank-Backup
```bash
# MySQL-Dump erstellen
docker compose exec mysql mysqldump -u semaphore -psemaphore semaphore > semaphore-backup-$(date +%Y%m%d).sql
# Backup wiederherstellen
docker compose exec -T mysql mysql -u semaphore -psemaphore semaphore < semaphore-backup-YYYYMMDD.sql
```
### Daten löschen und neu starten
```bash
# ⚠️ WARNUNG: Löscht alle Daten!
docker compose down -v
docker compose up -d
```
## Erste Schritte mit Semaphore
### 1. Projekt erstellen
1. Öffne http://localhost:3001 im Browser
2. Melde dich mit Admin-Credentials an
3. Klicke auf "New Project"
4. Gib einen Projektnamen ein (z.B. "My Project")
5. Klicke auf "Create"
### 2. Inventory anlegen
Ein Inventory definiert die Hosts, auf denen Playbooks ausgeführt werden sollen.
**Option A: Lokaler Host**
1. Gehe zu Projekt → Inventories → New Inventory
2. Name: "Local Hosts"
3. Hinzufügen von Host:
- Name: `localhost`
- Address: `127.0.0.1`
- SSH Username: `your-username`
- SSH Key: Füge deinen privaten SSH-Key hinzu
**Option B: Docker-Container**
1. Erstelle ein Inventory mit Docker-Hosts
2. Für Docker-in-Docker Support benötigst du zusätzliche Konfiguration
### 3. Template erstellen
Templates definieren welche Playbooks ausgeführt werden sollen.
1. Gehe zu Projekt → Templates → New Template
2. Template-Name: "Hello World"
3. Inventory: Wähle dein Inventory
4. Playbook: Erstelle ein einfaches Playbook:
```yaml
---
- hosts: all
gather_facts: no
tasks:
- name: Print hello world
debug:
msg: "Hello from Semaphore CI!"
```
5. Speichere das Template
### 4. Task ausführen
1. Gehe zu Templates
2. Klicke auf dein Template
3. Klicke auf "Run"
4. Beobachte die Ausführung in Echtzeit
## Integration mit bestehenden Stacks
### Verwendung mit lokaler Docker-Registry
Semaphore kann Docker-Images aus der lokalen Registry verwenden:
**In Ansible-Playbooks**:
```yaml
- name: Pull image from local registry
docker_image:
name: registry.michaelschiemer.de/framework:latest
source: pull
register: image_result
```
**Voraussetzung**: Der Semaphore-Container muss Zugriff auf den Docker-Socket oder die Registry haben.
### Verwendung mit bestehenden Ansible-Playbooks
1. Mounte deine Playbooks als Volume:
```yaml
volumes:
- /path/to/your/playbooks:/tmp/semaphore/playbooks:ro
```
2. Oder kopiere Playbooks in den Container:
```bash
docker compose exec semaphore mkdir -p /tmp/semaphore/my-playbook
docker cp my-playbook.yml semaphore:/tmp/semaphore/my-playbook/playbook.yml
```
3. Verweise im Template auf den Playbook-Pfad
## Troubleshooting
### Port-Konflikt (Port 3000 vs 3001)
**Problem**: Port 3000 ist standardmäßig von Gitea belegt, daher verwendet Semaphore Port 3001.
**Lösung**: Wenn du einen anderen Port verwenden möchtest, setze `SEMAPHORE_PORT` in der `.env` Datei:
```env
SEMAPHORE_PORT=8080 # Oder ein anderer freier Port
```
**Wichtig**: Der interne Container-Port bleibt immer 3000 - nur der externe Host-Port ändert sich.
### Semaphore startet nicht
**Prüfe Logs**:
```bash
docker compose logs semaphore
```
**Häufige Ursachen**:
- MySQL ist noch nicht bereit (warte auf Health-Check)
- Port 3001 ist bereits belegt: `netstat -tuln | grep 3001` (oder auf Windows: `netstat -ano | findstr :3001`)
- Falsche Datenbank-Credentials
**Lösung**:
```bash
# Prüfe MySQL-Status
docker compose ps mysql
# Prüfe Semaphore-Logs für DB-Verbindungsfehler
docker compose logs semaphore | grep -i database
# Restart wenn nötig
docker compose restart semaphore
```
### MySQL startet nicht
**Prüfe MySQL-Logs**:
```bash
docker compose logs mysql
```
**Häufige Ursachen**:
- Volume-Permissions-Probleme
- Port-Konflikte (unwahrscheinlich, da kein Port-Mapping)
**Lösung**:
```bash
# Prüfe Volume
docker volume inspect semaphore-mysql-data
# Cleanup und Neu-Start (⚠️ Datenverlust!)
docker compose down -v
docker compose up -d
```
### Login funktioniert nicht
**Standard-Credentials**:
- Username: `admin` (oder `SEMAPHORE_ADMIN` Wert)
- Password: `admin` (oder `SEMAPHORE_ADMIN_PASSWORD` Wert)
**Admin-Passwort zurücksetzen**:
1. Stoppe Semaphore: `docker compose stop semaphore`
2. Setze `SEMAPHORE_ADMIN_PASSWORD` in `.env` auf neues Passwort
3. Starte Semaphore: `docker compose up -d`
### Playbooks werden nicht gefunden
**Prüfe Playbook-Pfad**:
```bash
docker compose exec semaphore ls -la /tmp/semaphore
```
**Lösung**:
- Stelle sicher, dass `SEMAPHORE_PLAYBOOK_PATH` korrekt gesetzt ist
- Prüfe, ob Playbooks im richtigen Pfad liegen
- Stelle sicher, dass Datei-Berechtigungen korrekt sind
### Health-Check schlägt fehl
**Prüfe Health-Check**:
```bash
docker compose exec semaphore wget --no-verbose --spider http://localhost:3000/api/health
```
**Lösung**:
- Warte auf vollständigen Start (kann 1-2 Minuten dauern)
- Prüfe Logs: `docker compose logs semaphore`
- Restart wenn nötig: `docker compose restart semaphore`
## Sicherheit
### Lokale Entwicklung (Aktuell)
- ✅ Nur localhost-Zugriff (127.0.0.1:3000)
- ✅ Isoliertes Netzwerk (kein externer Zugriff)
- ✅ Keine Traefik-Integration
- ⚠️ Standard-Passwörter (nur für lokale Entwicklung)
### Für Produktion
Wenn du Semaphore später für Produktion nutzen willst:
1. **Starke Passwörter**: Ändere alle Passwörter in `.env`
2. **Encryption Key**: Generiere einen sicheren Key:
```bash
head -c32 /dev/urandom | base64
```
3. **Traefik-Integration**: Füge Traefik-Labels für HTTPS hinzu
4. **LDAP/SSO**: Konfiguriere externe Authentifizierung
5. **Backup-Strategie**: Regelmäßige MySQL-Backups einrichten
6. **Resource Limits**: Füge Memory/CPU-Limits hinzu
## Wartung
### Regelmäßige Aufgaben
**Wöchentlich**:
- Logs auf Fehler prüfen: `docker compose logs --tail=100`
- Disk-Space prüfen: `docker system df`
- Backup erstellen (wenn wichtige Daten vorhanden)
**Monatlich**:
- Images aktualisieren: `docker compose pull && docker compose up -d`
- Alte Tasks in Semaphore aufräumen (über Web-UI)
### Updates
```bash
# Aktuelle Images herunterladen
docker compose pull
# Mit neuen Images neu starten
docker compose up -d
# Logs prüfen
docker compose logs -f
```
### Daten bereinigen
```bash
# Alte Docker-Images löschen
docker image prune -a
# Alte Volumes prüfen
docker volume ls
# ⚠️ Vorsicht: Löscht alle Semaphore-Daten!
docker compose down -v
```
## Backup und Wiederherstellung
### Backup erstellen
```bash
# MySQL-Dump
docker compose exec mysql mysqldump \
-u semaphore -psemaphore semaphore \
> semaphore-backup-$(date +%Y%m%d-%H%M%S).sql
# Volume-Backup (komplett)
docker run --rm \
-v semaphore-mysql-data:/data \
-v $(pwd):/backup \
alpine tar czf /backup/semaphore-mysql-backup-$(date +%Y%m%d).tar.gz /data
```
### Wiederherstellung
```bash
# MySQL-Dump wiederherstellen
docker compose exec -T mysql mysql \
-u semaphore -psemaphore semaphore \
< semaphore-backup-YYYYMMDD.sql
# Volume wiederherstellen (⚠️ stoppt Container)
docker compose down
docker run --rm \
-v semaphore-mysql-data:/data \
-v $(pwd):/backup \
alpine sh -c "cd /data && tar xzf /backup/semaphore-mysql-backup-YYYYMMDD.tar.gz"
docker compose up -d
```
## Performance-Optimierung
### MySQL-Optimierung
Für bessere Performance kannst du MySQL-Konfiguration anpassen:
1. Erstelle `mysql/conf.d/my.cnf`:
```ini
[mysqld]
innodb_buffer_pool_size = 256M
max_connections = 100
```
2. Mounte in `docker-compose.yml`:
```yaml
volumes:
- ./mysql/conf.d:/etc/mysql/conf.d:ro
```
### Resource Limits
Füge Limits in `docker-compose.yml` hinzu:
```yaml
deploy:
resources:
limits:
memory: 1G
cpus: '0.5'
```
## Unterstützung
### Dokumentation
- **Semaphore CI Docs**: https://docs.semaphoreui.com/
- **Semaphore GitHub**: https://github.com/semaphoreui/semaphore
### Logs
```bash
# Alle Logs
docker compose logs -f
# Semaphore-Logs
docker compose logs -f semaphore
# MySQL-Logs
docker compose logs -f mysql
# Letzte 100 Zeilen
docker compose logs --tail=100
```
### Health-Checks
```bash
# Container-Status
docker compose ps
# Semaphore-Health
curl http://localhost:3001/api/health
# MySQL-Health
docker compose exec mysql mysqladmin ping -h localhost -u root -psemaphore_root
```
---
**Setup-Status**: ✅ Bereit für lokale Entwicklung
**Nächste Schritte**:
1. `cp env.example .env` ausführen
2. `docker compose up -d` starten
3. http://localhost:3001 öffnen
4. Mit Admin-Credentials anmelden
5. Erstes Projekt und Template erstellen

View File

@@ -0,0 +1,87 @@
services:
# MySQL Database for Semaphore
mysql:
image: mysql:8.0
container_name: semaphore-mysql
restart: unless-stopped
networks:
- semaphore-internal
environment:
- TZ=Europe/Berlin
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD:-semaphore_root}
- MYSQL_DATABASE=${MYSQL_DATABASE:-semaphore}
- MYSQL_USER=${MYSQL_USER:-semaphore}
- MYSQL_PASSWORD=${MYSQL_PASSWORD:-semaphore}
volumes:
- semaphore-mysql-data:/var/lib/mysql
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-p${MYSQL_ROOT_PASSWORD:-semaphore_root}"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
command: >
--default-authentication-plugin=mysql_native_password
--character-set-server=utf8mb4
--collation-server=utf8mb4_unicode_ci
# Semaphore CI/CD Platform
semaphore:
image: semaphoreui/semaphore:latest
container_name: semaphore
restart: unless-stopped
depends_on:
mysql:
condition: service_healthy
networks:
- semaphore-internal
ports:
# Only bind to localhost, not external interfaces
# Default port 3001 to avoid conflict with Gitea (port 3000)
- "127.0.0.1:${SEMAPHORE_PORT:-3001}:3000"
environment:
- TZ=Europe/Berlin
# Database Configuration
- SEMAPHORE_DB_DIALECT=mysql
- SEMAPHORE_DB_HOST=mysql
- SEMAPHORE_DB_PORT=3306
- SEMAPHORE_DB=${MYSQL_DATABASE:-semaphore}
- SEMAPHORE_DB_USER=${MYSQL_USER:-semaphore}
- SEMAPHORE_DB_PASS=${MYSQL_PASSWORD:-semaphore}
# Admin Configuration
- SEMAPHORE_ADMIN=${SEMAPHORE_ADMIN:-admin}
- SEMAPHORE_ADMIN_NAME=${SEMAPHORE_ADMIN_NAME:-Administrator}
- SEMAPHORE_ADMIN_EMAIL=${SEMAPHORE_ADMIN_EMAIL:-admin@localhost}
- SEMAPHORE_ADMIN_PASSWORD=${SEMAPHORE_ADMIN_PASSWORD:-admin}
# Playbook Path
- SEMAPHORE_PLAYBOOK_PATH=${SEMAPHORE_PLAYBOOK_PATH:-/tmp/semaphore}
# Encryption Key (generate with: head -c32 /dev/urandom | base64)
- SEMAPHORE_ACCESS_KEY_ENCRYPTION=${SEMAPHORE_ACCESS_KEY_ENCRYPTION:-change-me-in-production}
# Optional: LDAP Configuration (disabled by default)
- SEMAPHORE_LDAP_ENABLED=${SEMAPHORE_LDAP_ENABLED:-false}
# Optional: Webhook Configuration
- SEMAPHORE_WEBHOOK_URL=${SEMAPHORE_WEBHOOK_URL:-}
volumes:
- semaphore-data:/etc/semaphore
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
volumes:
semaphore-mysql-data:
name: semaphore-mysql-data
semaphore-data:
name: semaphore-data
networks:
semaphore-internal:
name: semaphore-internal
driver: bridge

View File

@@ -0,0 +1,51 @@
# Semaphore CI Stack - Environment Configuration
# Copy this file to .env and adjust values as needed
# Note: Rename this file to .env.example if you prefer the standard naming
# ============================================
# MySQL Database Configuration
# ============================================
MYSQL_ROOT_PASSWORD=semaphore_root
MYSQL_DATABASE=semaphore
MYSQL_USER=semaphore
MYSQL_PASSWORD=semaphore
# ============================================
# Semaphore Configuration
# ============================================
# Port binding (default: 3001)
# Only accessible via localhost (127.0.0.1)
# Note: Changed from 3000 to avoid conflict with Gitea
SEMAPHORE_PORT=3001
# Admin User Configuration
SEMAPHORE_ADMIN=admin
SEMAPHORE_ADMIN_NAME=Administrator
SEMAPHORE_ADMIN_EMAIL=admin@localhost
SEMAPHORE_ADMIN_PASSWORD=admin
# Playbook Storage Path (inside container)
SEMAPHORE_PLAYBOOK_PATH=/tmp/semaphore
# Access Key Encryption
# Generate with: head -c32 /dev/urandom | base64
# IMPORTANT: Change this in production!
SEMAPHORE_ACCESS_KEY_ENCRYPTION=change-me-in-production
# ============================================
# Optional: LDAP Configuration
# ============================================
# SEMAPHORE_LDAP_ENABLED=false
# SEMAPHORE_LDAP_HOST=ldap.example.com
# SEMAPHORE_LDAP_PORT=389
# SEMAPHORE_LDAP_DN=cn=admin,dc=example,dc=com
# SEMAPHORE_LDAP_PASSWORD=ldap_password
# SEMAPHORE_LDAP_BASE_DN=dc=example,dc=com
# SEMAPHORE_LDAP_USER_FILTER=(uid=%s)
# ============================================
# Optional: Webhook Configuration
# ============================================
# SEMAPHORE_WEBHOOK_URL=http://localhost:8080/webhook

225
docker-compose.base.yml Normal file
View File

@@ -0,0 +1,225 @@
# Base Docker Compose Configuration
# This file contains shared service definitions, networks, and volumes.
# Use with environment-specific override files:
# - docker-compose.local.yml (local development)
# - docker-compose.staging.yml (staging environment)
# - docker-compose.production.yml (production environment)
#
# Usage:
# Local: docker-compose -f docker-compose.base.yml -f docker-compose.local.yml up
# Staging: docker-compose -f docker-compose.base.yml -f docker-compose.staging.yml up
# Production: docker-compose -f docker-compose.base.yml -f docker-compose.production.yml up
services:
web:
build:
context: docker/nginx
dockerfile: Dockerfile
healthcheck:
test: ["CMD", "nc", "-z", "127.0.0.1", "443"]
interval: 30s
timeout: 10s
retries: 3
depends_on:
php:
condition: service_started
networks:
- frontend
- backend
php:
build:
context: .
dockerfile: docker/php/Dockerfile
args:
- ENV=${APP_ENV:-dev}
- COMPOSER_INSTALL_FLAGS=${COMPOSER_INSTALL_FLAGS:---no-scripts --no-autoloader}
healthcheck:
test: [ "CMD", "php", "-v" ]
interval: 30s
timeout: 10s
retries: 3
networks:
- backend
- cache
volumes:
# Shared Volume für Composer-Cache über Container-Neustarts hinweg
- composer-cache:/root/.composer/cache
# Persistent volumes for queue and logs
- storage-queue:/var/www/html/storage/queue:rw
- var-data:/var/www/html/var/logs:rw
tmpfs:
# tmpfs for cache and runtime directories (RAM-based, faster I/O)
- /var/www/html/storage/cache
- /var/www/html/storage/discovery
- /var/www/html/var/cache
- /tmp
php-test:
build:
context: .
dockerfile: docker/php/Dockerfile.test
user: "1000:1000"
profiles:
- test
volumes:
- composer-cache:/home/appuser/.composer/cache
# Persistent volumes for queue and logs
- storage-queue:/var/www/html/storage/queue:rw
- var-data:/var/www/html/var/logs:rw
tmpfs:
# tmpfs for cache and runtime directories (RAM-based, faster I/O)
- /var/www/html/storage/cache
- /var/www/html/storage/discovery
- /var/www/html/var/cache
- /tmp
environment:
APP_ENV: testing
APP_DEBUG: true
DB_HOST: db
REDIS_HOST: redis
networks:
- backend
- cache
entrypoint: []
command: ["php", "-v"]
db:
image: postgres:16-alpine
environment:
POSTGRES_DB: ${DB_DATABASE:-michaelschiemer}
POSTGRES_USER: ${DB_USERNAME:-postgres}
# SECURITY: POSTGRES_PASSWORD must be set explicitly (no hardcoded fallback)
# Set DB_PASSWORD in .env.local for local development
# Use Docker Secrets in production/staging via DB_PASSWORD_FILE
POSTGRES_PASSWORD: ${DB_PASSWORD}
# Performance & Connection Settings
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
PGDATA: /var/lib/postgresql/data/pgdata
volumes:
- db_data:/var/lib/postgresql/data
- "${DB_CONFIG_PATH:-./docker/postgres/postgresql.conf}:/etc/postgresql/postgresql.conf:ro"
- "${DB_INIT_PATH:-./docker/postgres/init}:/docker-entrypoint-initdb.d:ro"
command:
- "postgres"
- "-c"
- "config_file=/etc/postgresql/postgresql.conf"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USERNAME:-postgres} -d ${DB_DATABASE:-michaelschiemer}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
networks:
- backend
redis:
image: redis:7-alpine
volumes:
- "${REDIS_CONFIG_PATH:-./docker/redis/redis.conf}:/usr/local/etc/redis/redis.conf:ro"
- redis_data:/data
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
networks:
- cache
queue-worker:
build:
context: .
dockerfile: docker/worker/Dockerfile
entrypoint: "" # Override any entrypoint
command: ["php", "/var/www/html/worker.php"] # Direct command execution
depends_on:
php:
condition: service_healthy
redis:
condition: service_healthy
db:
condition: service_healthy
volumes:
# Use same storage volumes as PHP container for consistency
# Persistent volumes for queue and logs
- storage-queue:/var/www/html/storage/queue:rw
- var-data:/var/www/html/var/logs:rw
tmpfs:
# tmpfs for cache and runtime directories (RAM-based, faster I/O)
- /var/www/html/storage/cache
- /var/www/html/storage/discovery
- /var/www/html/var/cache
- /tmp
networks:
- backend
- cache
# Graceful shutdown timeout
stop_grace_period: 30s
minio:
image: minio/minio:latest
environment:
- TZ=Europe/Berlin
# SECURITY: MINIO credentials must be set explicitly (no hardcoded fallback)
# Set MINIO_ROOT_USER and MINIO_ROOT_PASSWORD in .env.local for local development
# Use Docker Secrets in production/staging for production deployments
- MINIO_ROOT_USER=${MINIO_ROOT_USER}
- MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD}
command: server /data --console-address ":9001"
volumes:
- minio_data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
networks:
- backend
networks:
frontend:
driver: bridge
backend:
driver: bridge
cache:
driver: bridge
volumes:
redis_data:
composer-cache:
storage-queue: # Queue-Verzeichnis (Performance-kritisch, persistent)
var-data: # Runtime logs (persistent)
db_data:
project-data:
worker-logs:
worker-queue:
worker-storage: # Complete separate storage for worker with correct permissions
minio_data: # MinIO object storage data
# Docker Secrets Configuration
# Secrets are defined here but activated in environment-specific override files
secrets:
db_root_password:
file: ./secrets/db_root_password.txt
external: false
db_user_password:
file: ./secrets/db_user_password.txt
external: false
redis_password:
file: ./secrets/redis_password.txt
external: false
app_key:
file: ./secrets/app_key.txt
external: false
vault_encryption_key:
file: ./secrets/vault_encryption_key.txt
external: false
git_token:
file: ./secrets/git_token.txt
external: false

158
docker-compose.local.yml Normal file
View File

@@ -0,0 +1,158 @@
# Local Development Override
# Usage: docker-compose -f docker-compose.base.yml -f docker-compose.local.yml up
#
# This file overrides base configuration with local development settings:
# - Development ports (8888:80, 8443:443, 5433:5432)
# - Host-mounted volumes for live code editing
# - Debug flags enabled (APP_DEBUG, Xdebug)
# - Development-friendly restart policies
services:
web:
container_name: web
ports:
- "8888:80"
- "8443:443"
environment:
- APP_ENV=${APP_ENV:-development}
volumes:
- ./:/var/www/html:${VOLUME_MODE:-cached}
- ./ssl:/var/www/ssl:ro
restart: ${RESTART_POLICY:-unless-stopped}
# NOTE: env_file not needed - Framework automatically loads .env.base → .env.local
# Environment variables are loaded by EncryptedEnvLoader in the PHP application
logging:
driver: "${LOG_DRIVER:-local}"
options:
max-size: "${LOG_MAX_SIZE:-5m}"
max-file: "${LOG_MAX_FILE:-2}"
healthcheck:
start_period: ${HEALTHCHECK_START_PERIOD:-10s}
deploy:
resources:
limits:
memory: ${WEB_MEMORY_LIMIT:-256M}
cpus: ${WEB_CPU_LIMIT:-0.5}
reservations:
memory: ${WEB_MEMORY_RESERVATION:-128M}
cpus: ${WEB_CPU_RESERVATION:-0.25}
php:
container_name: php
user: "${PHP_USER:-1000:1000}"
volumes:
# Host-Mounts für direkten Zugriff (Development-friendly)
- ./:/var/www/html:${VOLUME_MODE:-cached}
- ./storage/logs:/var/www/html/storage/logs:rw
- ./storage/uploads:/var/www/html/storage/uploads:rw
- ./storage/analytics:/var/www/html/storage/analytics:rw
environment:
PHP_IDE_CONFIG: "${PHP_IDE_CONFIG:-serverName=docker}"
APP_ENV: ${APP_ENV:-development}
APP_DEBUG: ${APP_DEBUG:-true}
XDEBUG_MODE: ${XDEBUG_MODE:-debug}
restart: ${RESTART_POLICY:-unless-stopped}
# NOTE: env_file not needed - Framework automatically loads .env.base → .env.local
# Environment variables are loaded by EncryptedEnvLoader in the PHP application
logging:
driver: "${LOG_DRIVER:-local}"
options:
max-size: "${LOG_MAX_SIZE:-5m}"
max-file: "${LOG_MAX_FILE:-2}"
deploy:
resources:
limits:
memory: ${PHP_MEMORY_LIMIT:-512M}
cpus: ${PHP_CPU_LIMIT:-1.0}
reservations:
memory: ${PHP_MEMORY_RESERVATION:-256M}
cpus: ${PHP_CPU_RESERVATION:-0.5}
php-test:
volumes:
- ./:/var/www/html:${VOLUME_MODE:-cached}
# NOTE: env_file not needed - Framework automatically loads .env.base → .env.local
db:
container_name: db
ports:
- "${DB_EXTERNAL_PORT:-5433}:5432"
restart: ${RESTART_POLICY:-unless-stopped}
logging:
driver: "${LOG_DRIVER:-local}"
options:
max-size: "${LOG_MAX_SIZE:-5m}"
max-file: "${LOG_MAX_FILE:-2}"
deploy:
resources:
limits:
memory: ${DB_MEMORY_LIMIT:-1G}
cpus: ${DB_CPU_LIMIT:-1.0}
reservations:
memory: ${DB_MEMORY_RESERVATION:-512M}
cpus: ${DB_CPU_RESERVATION:-0.5}
redis:
container_name: redis
restart: ${RESTART_POLICY:-unless-stopped}
logging:
driver: "${LOG_DRIVER:-local}"
options:
max-size: "${LOG_MAX_SIZE:-5m}"
max-file: "${LOG_MAX_FILE:-2}"
# NOTE: env_file not needed - Framework automatically loads .env.base → .env.local
deploy:
resources:
limits:
memory: ${REDIS_MEMORY_LIMIT:-256M}
cpus: ${REDIS_CPU_LIMIT:-0.5}
reservations:
memory: ${REDIS_MEMORY_RESERVATION:-128M}
cpus: ${REDIS_CPU_RESERVATION:-0.25}
queue-worker:
container_name: queue-worker
user: "1000:1000" # Same user ID as PHP container
volumes:
- ./:/var/www/html:cached
- ./storage/logs:/var/www/html/storage/logs:rw
environment:
- APP_ENV=${APP_ENV:-development}
- WORKER_DEBUG=${WORKER_DEBUG:-false}
- WORKER_SLEEP_TIME=${WORKER_SLEEP_TIME:-100000}
- WORKER_MAX_JOBS=${WORKER_MAX_JOBS:-1000}
restart: unless-stopped
# NOTE: env_file not needed - Framework automatically loads .env.base → .env.local
deploy:
resources:
limits:
memory: 1G
reservations:
memory: 512M
minio:
container_name: minio
ports:
- "${MINIO_API_PORT:-9000}:9000"
- "${MINIO_CONSOLE_PORT:-9001}:9001"
restart: ${RESTART_POLICY:-unless-stopped}
logging:
driver: "${LOG_DRIVER:-local}"
options:
max-size: "${LOG_MAX_SIZE:-5m}"
max-file: "${LOG_MAX_FILE:-2}"
deploy:
resources:
limits:
memory: ${MINIO_MEMORY_LIMIT:-512M}
cpus: ${MINIO_CPU_LIMIT:-0.5}
reservations:
memory: ${MINIO_MEMORY_RESERVATION:-256M}
cpus: ${MINIO_CPU_RESERVATION:-0.25}
networks:
backend:
internal: ${NETWORK_BACKEND_INTERNAL:-false}
cache:
internal: ${NETWORK_CACHE_INTERNAL:-false}

View File

@@ -1,14 +1,14 @@
# Production-specific Docker Compose overrides
# Usage: docker-compose -f docker-compose.yml -f docker-compose.production.yml --env-file .env.production up -d
# Production Environment Override
# Usage: docker-compose -f docker-compose.base.yml -f docker-compose.production.yml --env-file .env.production up -d
#
# This file overrides base configuration with production-specific settings:
# - Stricter resource limits
# - Production restart policies (always)
# - JSON logging with proper rotation
# - No host mounts (security)
# - Internal networks (security)
# - Production PostgreSQL configuration
# - Certbot for SSL certificates
# - Production port mappings (80, 443 for Let's Encrypt)
services:
web:
@@ -31,6 +31,16 @@ services:
- APP_ENV=production
- APP_DEBUG=false
# Security hardening
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
- NET_BIND_SERVICE # Required for binding to ports 80/443
# Stricter health checks for production
healthcheck:
test: ["CMD", "curl", "-f", "https://localhost/health"]
@@ -64,11 +74,6 @@ services:
certbot:
condition: service_started
# Networks must be explicitly defined to avoid override issues
networks:
- frontend
- backend
php:
# Production restart policy
restart: always
@@ -77,6 +82,15 @@ services:
# The entrypoint script will use gosu to switch to appuser after setup
user: "root"
# Security hardening (applied after gosu switches to appuser)
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
# Override build args for production
build:
args:
@@ -90,6 +104,16 @@ services:
- PHP_MAX_EXECUTION_TIME=30
# Disable Xdebug in production
- XDEBUG_MODE=off
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- DB_PASSWORD_FILE=/run/secrets/db_user_password
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
- APP_KEY_FILE=/run/secrets/app_key
- VAULT_ENCRYPTION_KEY_FILE=/run/secrets/vault_encryption_key
secrets:
- db_user_password
- redis_password
- app_key
- vault_encryption_key
# Stricter health checks
healthcheck:
@@ -127,15 +151,16 @@ services:
# Mount .env file from shared directory (production environment variables)
- /home/deploy/michaelschiemer/shared/.env.production:/var/www/html/.env:ro
# Networks must be explicitly defined to avoid override issues
networks:
- backend
- cache
db:
# Production restart policy
restart: always
# Use Docker Secrets for database password
environment:
POSTGRES_PASSWORD_FILE: /run/secrets/db_user_password
secrets:
- db_user_password
# Use production PostgreSQL configuration
volumes:
- db_data:/var/lib/postgresql/data
@@ -169,14 +194,51 @@ services:
compress: "true"
labels: "service,environment"
# Networks must be explicitly defined to avoid override issues
networks:
- backend
redis:
# Production restart policy
restart: always
# Use Docker Secrets for Redis password
environment:
REDIS_PASSWORD_FILE: /run/secrets/redis_password
secrets:
- redis_password
# Security hardening
security_opt:
- no-new-privileges:true
# Don't set user here - we need root to read Docker Secrets in entrypoint
# Redis will run as root, but this is acceptable for this use case
cap_drop:
- ALL
# Use entrypoint script to inject password from Docker Secret into config
# Note: Script runs as root to read Docker Secrets, then starts Redis
entrypoint: ["/bin/sh", "-c"]
command:
- |
# Read password from Docker Secret (as root)
REDIS_PASSWORD=$$(cat /run/secrets/redis_password 2>/dev/null || echo '')
# Start Redis with all settings as command line arguments (no config file to avoid conflicts)
if [ -n "$$REDIS_PASSWORD" ]; then
exec redis-server \
--bind 0.0.0.0 \
--dir /data \
--save 900 1 \
--save 300 10 \
--save 60 10000 \
--appendonly yes \
--requirepass "$$REDIS_PASSWORD"
else
exec redis-server \
--bind 0.0.0.0 \
--dir /data \
--save 900 1 \
--save 300 10 \
--save 60 10000 \
--appendonly yes
fi
# Production resource limits
deploy:
resources:
@@ -204,10 +266,6 @@ services:
compress: "true"
labels: "service,environment"
# Networks must be explicitly defined to avoid override issues
networks:
- cache
queue-worker:
# Use same build as php service (has application code copied)
@@ -238,6 +296,16 @@ services:
- WORKER_DEBUG=false
- WORKER_SLEEP_TIME=100000
- WORKER_MAX_JOBS=10000
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- DB_PASSWORD_FILE=/run/secrets/db_user_password
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
- APP_KEY_FILE=/run/secrets/app_key
- VAULT_ENCRYPTION_KEY_FILE=/run/secrets/vault_encryption_key
secrets:
- db_user_password
- redis_password
- app_key
- vault_encryption_key
# Production resource limits
deploy:
@@ -272,11 +340,6 @@ services:
php:
condition: service_healthy
# Networks must be explicitly defined to avoid override issues
networks:
- backend
- cache
# Certbot Sidecar Container for Let's Encrypt
certbot:
image: certbot/certbot:latest
@@ -306,15 +369,8 @@ services:
labels: "service,environment"
networks:
# Production networks with security isolation
frontend:
driver: bridge
backend:
driver: bridge
# NOTE: backend must NOT be internal - PHP needs to communicate with DB!
cache:
driver: bridge
internal: true # Cache network is internal
internal: true # Cache network is internal in production
volumes:
# Let's Encrypt SSL Certificates

410
docker-compose.staging.yml Normal file
View File

@@ -0,0 +1,410 @@
# Staging Environment Override
# Usage: docker-compose -f docker-compose.base.yml -f docker-compose.staging.yml up
#
# This file overrides base configuration with staging-specific settings:
# - Container names with "staging-" prefix
# - Traefik integration for staging.michaelschiemer.de
# - Git clone functionality for staging branch
# - Staging-specific networks (traefik-public, staging-internal)
# - Staging-specific volumes
services:
# PHP-FPM Application Runtime
staging-app:
image: git.michaelschiemer.de:5000/framework:latest
container_name: staging-app
restart: unless-stopped
networks:
- staging-internal
environment:
- TZ=Europe/Berlin
- APP_ENV=staging
- APP_DEBUG=${APP_DEBUG:-true}
- APP_URL=https://staging.michaelschiemer.de
- APP_KEY=${APP_KEY:-}
# Git Repository - clones staging branch
- GIT_REPOSITORY_URL=${GIT_REPOSITORY_URL:-}
- GIT_BRANCH=staging
- GIT_TOKEN=${GIT_TOKEN:-}
- GIT_USERNAME=${GIT_USERNAME:-}
- GIT_PASSWORD=${GIT_PASSWORD:-}
# Database (can share with production or use separate)
- DB_HOST=${DB_HOST:-postgres}
- DB_PORT=${DB_PORT:-5432}
- DB_DATABASE=${DB_DATABASE:-michaelschiemer_staging}
- DB_USERNAME=${DB_USERNAME}
- DB_PASSWORD=${DB_PASSWORD}
# Redis
- REDIS_HOST=staging-redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD}
# Cache
- CACHE_DRIVER=redis
- CACHE_PREFIX=${CACHE_PREFIX:-staging}
# Session
- SESSION_DRIVER=redis
- SESSION_LIFETIME=${SESSION_LIFETIME:-120}
# Queue
- QUEUE_DRIVER=redis
- QUEUE_CONNECTION=default
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- DB_PASSWORD_FILE=/run/secrets/db_user_password
- APP_KEY_FILE=/run/secrets/app_key
- VAULT_ENCRYPTION_KEY_FILE=/run/secrets/vault_encryption_key
- GIT_TOKEN_FILE=/run/secrets/git_token
volumes:
- staging-code:/var/www/html
- staging-storage:/var/www/html/storage
- staging-logs:/var/www/html/storage/logs
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
secrets:
- db_user_password
- app_key
- vault_encryption_key
- git_token
# Override entrypoint to only start PHP-FPM (not nginx) + fix git ownership
entrypoint: ["/bin/sh", "-c"]
command:
- |
# Fix Git ownership issue
# Ensure Git treats the mounted repository as safe regardless of owner
git config --global --add safe.directory /var/www/html 2>/dev/null || true
git config --system --add safe.directory /var/www/html 2>/dev/null || true
# Git Clone/Pull functionality
if [ -n "$GIT_REPOSITORY_URL" ]; then
echo ""
echo "📥 Cloning/Pulling code from Git repository..."
GIT_BRANCH="${GIT_BRANCH:-main}"
GIT_TARGET_DIR="/var/www/html"
# Setup Git credentials
if [ -n "$GIT_TOKEN" ]; then
GIT_URL_WITH_AUTH=$(echo "$GIT_REPOSITORY_URL" | sed "s|https://|https://${GIT_TOKEN}@|")
elif [ -n "$GIT_USERNAME" ] && [ -n "$GIT_PASSWORD" ]; then
GIT_URL_WITH_AUTH=$(echo "$GIT_REPOSITORY_URL" | sed "s|https://|https://${GIT_USERNAME}:${GIT_PASSWORD}@|")
else
GIT_URL_WITH_AUTH="$GIT_REPOSITORY_URL"
fi
# Clone or pull
if [ ! -d "$GIT_TARGET_DIR/.git" ]; then
echo "📥 Cloning repository from $GIT_REPOSITORY_URL (branch: $GIT_BRANCH)..."
if [ "$(ls -A $GIT_TARGET_DIR 2>/dev/null)" ]; then
find "$GIT_TARGET_DIR" -mindepth 1 -maxdepth 1 ! -name "storage" -exec rm -rf {} \; 2>/dev/null || true
fi
TEMP_CLONE="${GIT_TARGET_DIR}.tmp"
rm -rf "$TEMP_CLONE" 2>/dev/null || true
if git -c safe.directory=/var/www/html clone --branch "$GIT_BRANCH" --depth 1 "$GIT_URL_WITH_AUTH" "$TEMP_CLONE"; then
find "$GIT_TARGET_DIR" -mindepth 1 -maxdepth 1 ! -name "storage" -exec rm -rf {} \; 2>/dev/null || true
find "$TEMP_CLONE" -mindepth 1 -maxdepth 1 ! -name "." ! -name ".." -exec mv {} "$GIT_TARGET_DIR/" \; 2>/dev/null || true
rm -rf "$TEMP_CLONE" 2>/dev/null || true
echo "✅ Repository cloned successfully"
fi
else
echo "🔄 Pulling latest changes from $GIT_BRANCH..."
cd "$GIT_TARGET_DIR"
git -c safe.directory=/var/www/html fetch origin "$GIT_BRANCH" || echo "⚠️ Git fetch failed"
git -c safe.directory=/var/www/html reset --hard "origin/$GIT_BRANCH" || echo "⚠️ Git reset failed"
git -c safe.directory=/var/www/html clean -fd || true
fi
# Install dependencies
if [ -f "$GIT_TARGET_DIR/composer.json" ]; then
echo "📦 Installing/updating Composer dependencies..."
cd "$GIT_TARGET_DIR"
composer install --no-dev --optimize-autoloader --no-interaction --no-scripts || echo "⚠️ Composer install failed"
composer dump-autoload --optimize --classmap-authoritative || true
fi
echo "✅ Git sync completed"
else
echo ""
echo " GIT_REPOSITORY_URL not set, using code from image"
fi
echo ""
echo "📊 Environment variables:"
env | grep -E "DB_|APP_" | grep -v "PASSWORD|KEY|SECRET" || true
echo ""
echo "🛠️ Adjusting filesystem permissions..."
chown -R www-data:www-data /var/www/html/storage /var/www/html/bootstrap/cache 2>/dev/null || true
find /var/www/html/storage /var/www/html/bootstrap/cache -type d -exec chmod 775 {} \; 2>/dev/null || true
find /var/www/html/storage /var/www/html/bootstrap/cache -type f -exec chmod 664 {} \; 2>/dev/null || true
# Start PHP-FPM only (no nginx)
echo ""
echo "🚀 Starting PHP-FPM..."
exec php-fpm
healthcheck:
test: ["CMD-SHELL", "php-fpm-healthcheck || true"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
depends_on:
staging-redis:
condition: service_started
# Nginx Web Server
staging-nginx:
image: git.michaelschiemer.de:5000/framework:latest
container_name: staging-nginx
restart: unless-stopped
networks:
- traefik-public
- staging-internal
environment:
- TZ=Europe/Berlin
- APP_ENV=staging
- APP_DEBUG=${APP_DEBUG:-true}
# Git Repository - clones staging branch
- GIT_REPOSITORY_URL=${GIT_REPOSITORY_URL:-}
- GIT_BRANCH=staging
- GIT_TOKEN=${GIT_TOKEN:-}
- GIT_USERNAME=${GIT_USERNAME:-}
- GIT_PASSWORD=${GIT_PASSWORD:-}
volumes:
- ./deployment/stacks/staging/nginx/conf.d:/etc/nginx/conf.d:ro
- staging-code:/var/www/html:ro
- staging-storage:/var/www/html/storage:ro
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
# Wait for code to be available (cloned by staging-app container) then start nginx
entrypoint: ["/bin/sh", "-c"]
command:
- |
# Wait for code to be available in shared volume (staging-app container clones it)
GIT_TARGET_DIR="/var/www/html"
echo "⏳ [staging-nginx] Waiting for code to be available in shared volume..."
for i in 1 2 3 4 5 6 7 8 9 10; do
if [ -d "$$GIT_TARGET_DIR/public" ]; then
echo "✅ [staging-nginx] Code found in shared volume"
break
fi
echo " [staging-nginx] Waiting... ($$i/10)"
sleep 2
done
# If code still not available after wait, try to copy from image as fallback
if [ ! -d "$$GIT_TARGET_DIR/public" ] && [ -d "/var/www/html.orig" ]; then
echo "⚠️ [staging-nginx] Code not found in shared volume, copying from image..."
find /var/www/html.orig -mindepth 1 -maxdepth 1 ! -name "storage" -exec cp -r {} "$$GIT_TARGET_DIR/" \; 2>/dev/null || true
fi
# Fix nginx upstream configuration - sites-enabled/default overrides conf.d/default.conf
# This is critical: nginx sites-available/default uses 127.0.0.1:9000 but PHP-FPM runs in staging-app container
if [ -f "/etc/nginx/sites-available/default" ]; then
echo "🔧 [staging-nginx] Fixing PHP-FPM upstream configuration..."
# Replace in upstream block
sed -i '/upstream php-upstream {/,/}/s|server 127.0.0.1:9000;|server staging-app:9000;|g' /etc/nginx/sites-available/default || true
sed -i '/upstream php-upstream {/,/}/s|server localhost:9000;|server staging-app:9000;|g' /etc/nginx/sites-available/default || true
# Replace any direct fastcgi_pass references too
sed -i 's|fastcgi_pass 127.0.0.1:9000;|fastcgi_pass php-upstream;|g' /etc/nginx/sites-available/default || true
sed -i 's|fastcgi_pass localhost:9000;|fastcgi_pass php-upstream;|g' /etc/nginx/sites-available/default || true
echo "✅ [staging-nginx] PHP-FPM upstream fixed"
fi
# Start nginx only (no PHP-FPM, no Git clone - staging-app container handles that)
echo "🚀 [staging-nginx] Starting nginx..."
exec nginx -g "daemon off;"
labels:
- "traefik.enable=true"
# HTTP Router for staging subdomain
- "traefik.http.routers.staging.rule=Host(`staging.michaelschiemer.de`)"
- "traefik.http.routers.staging.entrypoints=websecure"
- "traefik.http.routers.staging.tls=true"
- "traefik.http.routers.staging.tls.certresolver=letsencrypt"
# Service
- "traefik.http.services.staging.loadbalancer.server.port=80"
# Middleware
- "traefik.http.routers.staging.middlewares=default-chain@file"
# Network
- "traefik.docker.network=traefik-public"
healthcheck:
test: ["CMD-SHELL", "curl -f http://127.0.0.1/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
depends_on:
staging-app:
condition: service_started
# Remove base service dependencies and build
ports: []
# Redis Cache/Session/Queue Backend (separate from production)
staging-redis:
image: redis:7-alpine
container_name: staging-redis
restart: unless-stopped
networks:
- staging-internal
environment:
- TZ=Europe/Berlin
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
secrets:
- redis_password
command: >
sh -c "
REDIS_PASSWORD=$$(cat /run/secrets/redis_password 2>/dev/null || echo ${REDIS_PASSWORD})
redis-server
--requirepass $$REDIS_PASSWORD
--maxmemory 256mb
--maxmemory-policy allkeys-lru
--save 900 1
--save 300 10
--save 60 10000
--appendonly yes
--appendfsync everysec
"
volumes:
- staging-redis-data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
# Queue Worker (Background Jobs)
staging-queue-worker:
image: git.michaelschiemer.de:5000/framework:latest
container_name: staging-queue-worker
restart: unless-stopped
networks:
- staging-internal
environment:
- TZ=Europe/Berlin
- APP_ENV=staging
- APP_DEBUG=${APP_DEBUG:-true}
# Database
- DB_HOST=${DB_HOST:-postgres}
- DB_PORT=${DB_PORT:-5432}
- DB_DATABASE=${DB_DATABASE:-michaelschiemer_staging}
- DB_USERNAME=${DB_USERNAME}
- DB_PASSWORD=${DB_PASSWORD}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- DB_PASSWORD_FILE=/run/secrets/db_user_password
# Redis
- REDIS_HOST=staging-redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD}
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
# Queue
- QUEUE_DRIVER=redis
- QUEUE_CONNECTION=default
- QUEUE_WORKER_SLEEP=${QUEUE_WORKER_SLEEP:-3}
- QUEUE_WORKER_TRIES=${QUEUE_WORKER_TRIES:-3}
- QUEUE_WORKER_TIMEOUT=${QUEUE_WORKER_TIMEOUT:-60}
volumes:
- staging-code:/var/www/html
- staging-storage:/var/www/html/storage
- staging-logs:/var/www/html/storage/logs
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
command: php console.php queue:work --queue=default --timeout=${QUEUE_WORKER_TIMEOUT:-60}
healthcheck:
test: ["CMD-SHELL", "php -r 'exit(0);' && test -f /var/www/html/console.php || exit 1"]
interval: 60s
timeout: 10s
retries: 3
start_period: 30s
depends_on:
staging-app:
condition: service_started
staging-redis:
condition: service_started
entrypoint: ""
stop_grace_period: 30s
secrets:
- db_user_password
- redis_password
- app_key
- vault_encryption_key
# Scheduler (Cron Jobs)
staging-scheduler:
image: git.michaelschiemer.de:5000/framework:latest
container_name: staging-scheduler
restart: unless-stopped
networks:
- staging-internal
environment:
- TZ=Europe/Berlin
- APP_ENV=staging
- APP_DEBUG=${APP_DEBUG:-true}
# Database
- DB_HOST=${DB_HOST:-postgres}
- DB_PORT=${DB_PORT:-5432}
- DB_DATABASE=${DB_DATABASE:-michaelschiemer_staging}
- DB_USERNAME=${DB_USERNAME}
- DB_PASSWORD=${DB_PASSWORD}
# Use Docker Secrets via *_FILE pattern (Framework supports this automatically)
- DB_PASSWORD_FILE=/run/secrets/db_user_password
# Redis
- REDIS_HOST=staging-redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD}
- REDIS_PASSWORD_FILE=/run/secrets/redis_password
volumes:
- staging-code:/var/www/html
- staging-storage:/var/www/html/storage
- staging-logs:/var/www/html/storage/logs
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
command: php console.php scheduler:run
healthcheck:
test: ["CMD-SHELL", "php -r 'exit(0);' && test -f /var/www/html/console.php || exit 1"]
interval: 60s
timeout: 10s
retries: 3
start_period: 30s
depends_on:
staging-app:
condition: service_started
staging-redis:
condition: service_started
entrypoint: ""
stop_grace_period: 30s
secrets:
- db_user_password
- redis_password
- app_key
- vault_encryption_key
# Remove base services that are not needed in staging
web:
profiles:
- never
php:
profiles:
- never
db:
profiles:
- never
redis:
profiles:
- never
queue-worker:
profiles:
- never
minio:
profiles:
- never
networks:
traefik-public:
external: true
staging-internal:
driver: bridge
volumes:
staging-code:
name: staging-code
staging-storage:
name: staging-storage
staging-logs:
name: staging-logs
staging-redis-data:
name: staging-redis-data

View File

@@ -1,3 +1,36 @@
# ⚠️ DEPRECATED - Legacy Docker Compose Configuration ⚠️
#
# This file is DEPRECATED and kept ONLY for backward compatibility during migration.
# ⚠️ DO NOT USE THIS FILE FOR NEW DEPLOYMENTS ⚠️
#
# This file will be REMOVED after the migration period (planned: Q2 2025).
# All developers must migrate to the Base+Override Pattern before then.
#
# ✅ PREFERRED: Use Base+Override Pattern:
# - docker-compose.base.yml (shared services)
# - docker-compose.local.yml (local development overrides)
# - docker-compose.staging.yml (staging overrides)
# - docker-compose.production.yml (production overrides)
#
# 📖 Usage:
# Local: docker compose -f docker-compose.base.yml -f docker-compose.local.yml up
# Staging: docker compose -f docker-compose.base.yml -f docker-compose.staging.yml up
# Production: docker compose -f docker-compose.base.yml -f docker-compose.production.yml up
#
# 🔗 See deployment/README.md for details on the Base+Override Pattern
# 🔗 See ENV_SETUP.md for environment configuration guide
#
# ⚠️ Migration Required:
# 1. Create .env.base from .env.example (run: make env-base)
# 2. Create .env.local for local overrides (run: make env-local)
# 3. Update all docker compose commands to use Base+Override files
# 4. Test your local setup before removing this legacy file
#
# 📅 Deprecation Timeline:
# - Created: Base+Override Pattern introduced
# - Planned Removal: Q2 2025 (after all developers have migrated)
# - Action Required: Migrate before removal date
services:
web:
container_name: web
@@ -30,6 +63,9 @@ services:
networks:
- frontend
- backend
# Legacy .env file (Fallback for backward compatibility)
# Preferred: Use docker-compose.base.yml + docker-compose.local.yml
# See ENV_SETUP.md for new Base+Override Pattern
env_file:
- .env
deploy:
@@ -90,6 +126,8 @@ services:
networks:
- backend
- cache
# Legacy .env file (Fallback for backward compatibility)
# Preferred: Use docker-compose.base.yml + docker-compose.local.yml
env_file:
- .env
deploy:
@@ -124,6 +162,7 @@ services:
networks:
- backend
- cache
# Legacy .env file (Fallback for backward compatibility)
env_file:
- .env
entrypoint: []
@@ -193,6 +232,7 @@ services:
max-file: "${LOG_MAX_FILE:-2}"
networks:
- cache
# Legacy .env file (Fallback for backward compatibility)
env_file:
- .env
deploy:
@@ -236,6 +276,7 @@ services:
networks:
- backend
- cache
# Legacy .env file (Fallback for backward compatibility)
env_file:
- .env
# Graceful shutdown timeout

View File

@@ -1,31 +1,34 @@
#!/bin/bash
set -e
echo "🔐 Loading secrets from /run/secrets/..."
echo "🔐 Loading secrets..."
# Function to load secret from file if *_FILE env var is set
load_secret() {
# This is a fallback for environments where Docker Secrets are not configured
# The Framework's DockerSecretsResolver handles *_FILE pattern automatically
load_secret_fallback() {
local var_name="$1"
local file_var="${var_name}_FILE"
if [ -n "${!file_var}" ] && [ -f "${!file_var}" ]; then
# Only load manually if *_FILE is set but Framework hasn't loaded it yet
# (This is mainly for backward compatibility during migration)
if [ -n "${!file_var}" ] && [ -f "${!file_var}" ] && [ -z "${!var_name}" ]; then
export "$var_name"="$(cat "${!file_var}")"
echo "✅ Loaded $var_name from ${!file_var}"
echo "✅ Loaded $var_name from ${!file_var} (fallback)"
fi
}
# Load database password from secret file
load_secret "DB_PASSWORD"
# Load secrets as fallback (Framework handles *_FILE pattern automatically via DockerSecretsResolver)
# This is mainly for backward compatibility during migration
load_secret_fallback "DB_PASSWORD"
load_secret_fallback "REDIS_PASSWORD"
load_secret_fallback "APP_KEY"
load_secret_fallback "VAULT_ENCRYPTION_KEY"
load_secret_fallback "SHOPIFY_WEBHOOK_SECRET"
load_secret_fallback "RAPIDMAIL_PASSWORD"
load_secret_fallback "GIT_TOKEN"
# Load other secrets
load_secret "REDIS_PASSWORD"
load_secret "APP_KEY"
load_secret "VAULT_ENCRYPTION_KEY"
load_secret "SHOPIFY_WEBHOOK_SECRET"
load_secret "RAPIDMAIL_PASSWORD"
load_secret "GIT_TOKEN"
echo "✅ All secrets loaded"
echo "✅ Secrets loading completed (Framework handles *_FILE pattern automatically)"
# Git Clone/Pull functionality
if [ -n "$GIT_REPOSITORY_URL" ]; then

View File

@@ -0,0 +1,243 @@
# Autossh Setup - Abgeschlossen
**Datum**: 2025-11-02
**Status**: ? Erfolgreich konfiguriert
**Server**: Production (94.16.110.151)
---
## Durchgef?hrte Schritte
### 1. Installation von Autossh
Autossh war bereits auf dem System installiert:
```bash
/usr/bin/autossh
```
### 2. SSH-Konfiguration erweitert
Die SSH-Config (`~/.ssh/config`) wurde erweitert mit folgenden Eintr?gen:
```ssh-config
Host production
HostName 94.16.110.151
User deploy
IdentityFile ~/.ssh/production
ServerAliveInterval 60
ServerAliveCountMax 3
TCPKeepAlive yes
Compression yes
StrictHostKeyChecking accept-new
```
**Wichtige Optionen:**
- `ServerAliveInterval 60`: Sendet alle 60 Sekunden ein Keep-Alive-Signal
- `ServerAliveCountMax 3`: Nach 3 fehlgeschlagenen Versuchen aufgeben
- `TCPKeepAlive yes`: Nutzt TCP Keep-Alive f?r zus?tzliche Persistenz
### 3. Systemd Service erstellt
Systemd Service wurde erstellt unter:
```
~/.config/systemd/user/autossh-production.service
```
**Service-Konfiguration:**
```ini
[Unit]
Description=AutoSSH for production
After=network.target
[Service]
Type=simple
Environment="AUTOSSH_GATETIME=0"
Environment="AUTOSSH_POLL=10"
ExecStart=/usr/bin/autossh -M 20000 -N -o "ServerAliveInterval=60" -o "ServerAliveCountMax=3" production
Restart=always
RestartSec=10
[Install]
WantedBy=default.target
```
**Wichtige Parameter:**
- `-M 20000`: Monitoring-Port (autossh nutzt diesen zur Verbindungs?berwachung)
- `-N`: Keine Remote-Commands ausf?hren (nur persistente Verbindung)
- `AUTOSSH_GATETIME=0`: Keine Wartezeit nach Start (sofortige Verbindung)
- `AUTOSSH_POLL=10`: Polling-Intervall in Sekunden
**Hinweis**: Das `-f` Flag wurde entfernt, da es mit systemd Type=simple nicht kompatibel ist.
### 4. Service aktiviert und gestartet
```bash
# Service aktivieren (startet automatisch beim Login)
systemctl --user enable autossh-production.service
# Service starten
systemctl --user start autossh-production.service
```
### 5. Status ?berpr?ft
Service Status:
```
? autossh-production.service - AutoSSH for production
Loaded: loaded (/home/michael/.config/systemd/user/autossh-production.service; enabled; preset: enabled)
Active: active (running) since Sun 2025-11-02 18:21:06 CET
Main PID: 35533 (autossh)
Tasks: 2 (limit: 14999)
Memory: 1.7M
```
**Laufende Prozesse:**
- Autossh Main Process: PID 35533
- SSH Connection Process: PID 35537
---
## Verbindungstest
SSH-Verbindung erfolgreich getestet:
```bash
ssh production "echo 'Connection test successful'"
# Output: Connection test successful
```
---
## Service-Management
### Status pr?fen
```bash
systemctl --user status autossh-production.service
```
### Logs anzeigen
```bash
journalctl --user -u autossh-production.service -f
```
### Service stoppen
```bash
systemctl --user stop autossh-production.service
```
### Service neu starten
```bash
systemctl --user restart autossh-production.service
```
### Service deaktivieren
```bash
systemctl --user disable autossh-production.service
```
---
## Funktionsweise
Autossh ?berwacht die SSH-Verbindung kontinuierlich:
1. **Monitoring-Port**: Port 20000 wird genutzt, um die Verbindung zu ?berwachen
2. **Keep-Alive**: Alle 60 Sekunden wird ein Keep-Alive-Signal gesendet
3. **Automatischer Neustart**: Bei Verbindungsabbruch wird die Verbindung automatisch neu aufgebaut
4. **Systemd Integration**: Bei Service-Fehler startet systemd den Service nach 10 Sekunden neu
---
## Bekannte Probleme & L?sungen
### Problem 1: Monitoring-Port Format
**Fehler**: `invalid port "127.0.0.1"`
**L?sung**: `-M` Parameter sollte nur die Port-Nummer sein, nicht `IP:Port`
```bash
# Falsch:
-M 127.0.0.1:20000
# Richtig:
-M 20000
```
### Problem 2: `-f` Flag mit systemd
**Fehler**: Service startet, beendet sich aber sofort
**L?sung**: `-f` Flag entfernen bei systemd Type=simple (systemd ?bernimmt Background-Operation)
### Problem 3: Service startet nicht automatisch
**L?sung**: User lingering aktivieren f?r automatischen Start ohne Login:
```bash
sudo loginctl enable-linger $USER
```
---
## N?chste Schritte
### F?r weitere Server
Das Setup-Script kann f?r weitere Server verwendet werden:
```bash
./scripts/setup-autossh.sh production # Nur Production
./scripts/setup-autossh.sh git # Nur Git Server
./scripts/setup-autossh.sh both # Beide
```
### SSH-Tunnel einrichten
Falls SSH-Tunnel ben?tigt werden (z.B. Port-Forwarding):
```bash
# Lokalen Port weiterleiten
autossh -M 20002 -N -L 8080:localhost:80 production
```
### Monitoring
Regelm??ig den Service-Status ?berpr?fen:
```bash
systemctl --user status autossh-production.service
journalctl --user -u autossh-production.service --since "1 hour ago"
```
---
## Makefile-Befehle
Das Projekt bietet jetzt folgende Makefile-Befehle f?r SSH-Verbindungen:
```bash
# SSH-Verbindung zum Production-Server ?ffnen
make ssh
# oder
make ssh-production
# SSH-Verbindung zum Git-Server ?ffnen
make ssh-git
# Status der autossh-Services pr?fen
make ssh-status
# Logs der autossh-Services anzeigen
make ssh-logs
# Autossh einrichten
make setup-autossh
```
## Referenzen
- **Setup-Script**: `scripts/setup-autossh.sh`
- **Dokumentation**: `docs/deployment/AUTOSSH-SETUP.md`
- **SSH-Config**: `~/.ssh/config`
- **Service-Datei**: `~/.config/systemd/user/autossh-production.service`
- **Makefile**: `Makefile` (Befehle: `ssh`, `ssh-status`, `ssh-logs`, `setup-autossh`)
---
## Zusammenfassung
? Autossh erfolgreich installiert
? SSH-Config mit Keep-Alive-Optionen erweitert
? Systemd Service erstellt und konfiguriert
? Service aktiviert und gestartet
? Verbindungstest erfolgreich
? Automatischer Neustart bei Verbindungsabbruch aktiviert
Die SSH-Verbindung zum Production-Server wird jetzt automatisch ?berwacht und bei Abbruch neu aufgebaut.

View File

@@ -0,0 +1,428 @@
# Autossh Setup - Persistente SSH-Verbindungen
**Status**: ? Ready
**Last Updated**: 2025-01-31
**Purpose**: Automatische ?berwachung und Neustart von SSH-Verbindungen zum Production-Server
---
## ?bersicht
Autossh ist ein Tool, das SSH-Verbindungen automatisch ?berwacht und neu aufbaut, wenn sie abbrechen. Dies ist besonders n?tzlich f?r:
- SSH-Tunnel zu entfernten Servern
- Persistente SSH-Verbindungen f?r Ansible/CI/CD
- Automatische Verbindungswiederherstellung nach Netzwerkunterbrechungen
---
## Installation
### Linux (Ubuntu/Debian)
```bash
sudo apt update
sudo apt install autossh
```
### macOS
```bash
brew install autossh
```
### WSL2 / Windows
Autossh ist normalerweise ?ber das Linux-Subsystem verf?gbar. Falls nicht:
```bash
# In WSL2
sudo apt update
sudo apt install autossh
```
---
## Konfiguration
### Schritt 1: SSH-Config erweitern
Erweitere deine `~/.ssh/config` mit Keep-Alive und ServerAliveInterval Optionen:
```bash
# Edit SSH config
nano ~/.ssh/config
```
F?ge folgende Konfiguration hinzu:
```
# Production Server - Persistent Connection
Host production
HostName 94.16.110.151
User deploy
IdentityFile ~/.ssh/production
ServerAliveInterval 60
ServerAliveCountMax 3
TCPKeepAlive yes
Compression yes
StrictHostKeyChecking accept-new
# Git Server - Persistent Connection
Host git.michaelschiemer.de
HostName git.michaelschiemer.de
Port 2222
User git
IdentityFile ~/.ssh/git_michaelschiemer
ServerAliveInterval 60
ServerAliveCountMax 3
TCPKeepAlive yes
Compression yes
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
```
**Wichtige Optionen:**
- `ServerAliveInterval 60`: Sendet alle 60 Sekunden ein Keep-Alive-Signal
- `ServerAliveCountMax 3`: Gibt nach 3 fehlgeschlagenen Keep-Alive-Versuchen auf
- `TCPKeepAlive yes`: Nutzt TCP Keep-Alive f?r zus?tzliche Persistenz
### Schritt 2: Autossh als Service einrichten
#### Option A: Systemd Service (Linux/WSL2)
Erstelle einen systemd Service f?r autossh:
```bash
# Create systemd service directory
mkdir -p ~/.config/systemd/user
# Create service file
nano ~/.config/systemd/user/autossh-production.service
```
Service-Datei Inhalt:
```ini
[Unit]
Description=AutoSSH for Production Server
After=network.target
[Service]
Type=simple
Environment="AUTOSSH_GATETIME=0"
Environment="AUTOSSH_POLL=10"
ExecStart=/usr/bin/autossh -M 20000 -N -o "ServerAliveInterval=60" -o "ServerAliveCountMax=3" production
Restart=always
RestartSec=10
[Install]
WantedBy=default.target
```
**Wichtige Hinweise:**
- `-M 20000`: Monitoring-Port (nur Port-Nummer, nicht IP:Port!)
- `-N`: Keine Remote-Commands (nur persistente Verbindung)
- **Kein `-f` Flag**: Bei systemd Type=simple wird `-f` nicht ben?tigt, da systemd die Background-Operation ?bernimmt
**Service aktivieren:**
```bash
# Reload systemd user services
systemctl --user daemon-reload
# Enable service (startet automatisch beim Login)
systemctl --user enable autossh-production.service
# Start service
systemctl --user start autossh-production.service
# Check status
systemctl --user status autossh-production.service
# View logs
journalctl --user -u autossh-production.service -f
```
#### Option B: Manuelle Autossh-Verbindung
F?r manuelle/tempor?re Verbindungen:
```bash
# Start autossh mit Monitoring-Port
autossh -M 20000 -N -f -o "ServerAliveInterval=60" -o "ServerAliveCountMax=3" production
# Check if running
ps aux | grep autossh
# Stop autossh
pkill autossh
```
**Parameter-Erkl?rung:**
- `-M 20000`: Monitoring-Port (autossh nutzt diesen zum Health-Check)
- `-N`: Keine Remote-Commands ausf?hren (nur Tunnel)
- `-f`: Im Hintergrund laufen
- `-o "ServerAliveInterval=60"`: SSH Keep-Alive alle 60 Sekunden
- `-o "ServerAliveCountMax=3"`: Nach 3 Fehlversuchen aufgeben
#### Option C: SSH-Tunnel mit Autossh
F?r SSH-Tunnel (z.B. Port-Forwarding):
```bash
# Forward local port 8080 to remote 80
autossh -M 20000 -N -f -L 8080:localhost:80 production
# Forward remote port 3306 to local
autossh -M 20000 -N -f -R 3306:localhost:3306 production
# Check tunnel
ps aux | grep autossh
ss -tuln | grep 8080
```
---
## Testing
### Verbindung testen
```bash
# Test normal SSH
ssh production "echo 'Connection successful'"
# Test autossh connection
autossh -M 20000 -v -N -o "ServerAliveInterval=60" production
# Check if autossh is monitoring
ps aux | grep autossh
netstat -tuln | grep 20000
```
### Verbindungsstatus ?berwachen
```bash
# Check active SSH connections
ssh production "who"
# Check autossh process
ps aux | grep autossh
# Check systemd service status
systemctl --user status autossh-production.service
# View logs
journalctl --user -u autossh-production.service --since "10 minutes ago"
```
---
## Troubleshooting
### Autossh startet nicht
**Problem**: Autossh-Process startet nicht oder crasht sofort
**L?sung**:
```bash
# Test SSH-Verbindung manuell
ssh -v production "echo test"
# Test autossh mit verbose logging
autossh -M 20000 -v -N production
# Pr?fe SSH-Config
ssh -F ~/.ssh/config production "echo test"
# Pr?fe Berechtigungen
ls -la ~/.ssh/production
chmod 600 ~/.ssh/production
```
### Verbindung bricht trotzdem ab
**Problem**: Verbindung bricht auch mit autossh regelm??ig ab
**L?sung**:
1. **Erh?he Keep-Alive-Interval:**
```bash
# In ~/.ssh/config
ServerAliveInterval 30
ServerAliveCountMax 10
```
2. **Pr?fe Netzwerk/Firewall:**
```bash
# Test network connectivity
ping 94.16.110.151
# Test SSH port
nc -zv 94.16.110.151 22
```
3. **Pr?fe Server-Konfiguration:**
```bash
# Auf dem Server pr?fen
ssh production "cat /etc/ssh/sshd_config | grep -E 'ClientAlive|TCPKeepAlive'"
```
### Port-Konflikte
**Problem**: Monitoring-Port (20000) ist bereits belegt
**L?sung**:
```bash
# W?hle einen anderen Port
autossh -M 20001 -N -f production
# Oder nutze einen zuf?lligen Port
autossh -M 0 -N -f production # 0 = random port
```
---
## Best Practices
### 1. Monitoring-Port anpassen
Wenn mehrere autossh-Instanzen laufen, nutze verschiedene Monitoring-Ports:
```bash
# Production Server
autossh -M 20000 -N -f production
# Git Server
autossh -M 20001 -N -f git.michaelschiemer.de
```
### 2. Systemd Service f?r Produktivit?t
Nutze systemd Services f?r automatischen Start:
```bash
# Enable lingering f?r user services
sudo loginctl enable-linger $USER
# Services starten beim Boot
systemctl --user enable autossh-production.service
```
### 3. Logging konfigurieren
F?r besseres Debugging:
```bash
# Systemd service mit logging
[Service]
ExecStart=/usr/bin/autossh -M 20000 -v -N -o "ServerAliveInterval=60" -o "LogLevel=DEBUG" production
StandardOutput=journal
StandardError=journal
```
### 4. Automatischer Neustart
Systemd Service startet automatisch neu, aber f?r manuelle Instanzen:
```bash
# Mit automatischem Restart
while true; do
autossh -M 20000 -N production || sleep 10
done
```
---
## Integration mit Ansible
Autossh kann auch f?r Ansible-Verbindungen genutzt werden:
```yaml
# ansible.cfg
[defaults]
transport = ssh
pipelining = True
ssh_args = -o ServerAliveInterval=60 -o ServerAliveCountMax=3
control_path = ~/.ansible/cp/%%h-%%p-%%r
```
Oder nutze die SSH-Config direkt (empfohlen):
```bash
# ~/.ssh/config ist bereits f?r Ansible nutzbar
ansible production -m ping
```
---
## Sicherheitshinweise
1. **SSH-Keys sch?tzen:**
```bash
chmod 600 ~/.ssh/production
chmod 644 ~/.ssh/production.pub
```
2. **Monitoring-Port absichern:**
```bash
# Monitoring-Port nur lokal verf?gbar
autossh -M 127.0.0.1:20000 -N -f production
```
3. **Keine Passw?rter:**
- Nutze immer SSH-Keys
- Keine Passw?rter in autossh-Commands
---
## Quick Reference
### Makefile-Befehle
```bash
# SSH-Verbindung zum Production-Server
make ssh
# oder
make ssh-production
# SSH-Verbindung zum Git-Server
make ssh-git
# Status der autossh-Services pr?fen
make ssh-status
# Logs der autossh-Services anzeigen
make ssh-logs
# Autossh einrichten
make setup-autossh
```
### Manuelle Befehle
```bash
# Service starten
systemctl --user start autossh-production.service
# Service stoppen
systemctl --user stop autossh-production.service
# Service Status
systemctl --user status autossh-production.service
# Logs anzeigen
journalctl --user -u autossh-production.service -f
# Manuelle Verbindung (ohne systemd)
autossh -M 20000 -N -f production
# Verbindung beenden
pkill autossh
```
---
## Weitere Ressourcen
- [Autossh Manual](https://www.harding.motd.ca/autossh/)
- [SSH Keep-Alive Documentation](https://www.ssh.com/academy/ssh/config)
- [Systemd User Services](https://wiki.archlinux.org/title/Systemd/User)

View File

@@ -0,0 +1,319 @@
# SSH Makefile-Befehle
**Datum**: 2025-11-02
**Status**: ? Verf?gbar
**Zweck**: Einfache SSH-Verbindungen ?ber Makefile-Befehle
---
## ?bersicht
Das Projekt bietet Makefile-Befehle f?r SSH-Verbindungen zum Production- und Git-Server. Diese nutzen die konfigurierte SSH-Config (`~/.ssh/config`) und autossh f?r persistente Verbindungen.
---
## Verf?gbare Befehle
### `make ssh` oder `make ssh-production`
?ffnet eine SSH-Verbindung zum Production-Server.
```bash
make ssh
```
**Was passiert:**
- Nutzt die SSH-Config (`~/.ssh/config`) mit dem `production` Host
- Verbindet zu `94.16.110.151` als User `deploy`
- Nutzt den SSH-Schl?ssel `~/.ssh/production`
- Keep-Alive aktiviert (ServerAliveInterval 60)
**Beispiel:**
```bash
$ make ssh
?? Verbinde zum Production-Server...
Welcome to Ubuntu...
deploy@production:~$
```
---
### `make ssh-git`
?ffnet eine SSH-Verbindung zum Git-Server.
```bash
make ssh-git
```
**Was passiert:**
- Nutzt die SSH-Config mit dem `git.michaelschiemer.de` Host
- Verbindet zu `git.michaelschiemer.de` Port 2222 als User `git`
- Nutzt den SSH-Schl?ssel `~/.ssh/git_michaelschiemer`
---
### `make ssh-status`
Pr?ft den Status der autossh-Services.
```bash
make ssh-status
```
**Ausgabe:**
```bash
?? Pr?fe autossh Service-Status...
? autossh-production.service - AutoSSH for production
Loaded: loaded (/home/michael/.config/systemd/user/autossh-production.service; enabled; preset: enabled)
Active: active (running) since Sun 2025-11-02 18:21:06 CET
Main PID: 35533 (autossh)
Tasks: 2 (limit: 14999)
Memory: 1.8M
michael 35533 0.0 0.0 2484 1536 ? Ss 18:21 0:00 /usr/lib/autossh/autossh -M 20000 -N -o ServerAliveInterval=60 -o ServerAliveCountMax=3 production
```
---
### `make ssh-logs`
Zeigt die Logs der autossh-Services an.
```bash
make ssh-logs
```
**Ausgabe:**
```bash
?? Zeige autossh Logs...
Nov 02 18:21:06 Mike-PC systemd[19787]: Started autossh-production.service - AutoSSH for production.
Nov 02 18:21:06 Mike-PC autossh[35533]: short poll time: adjusting net timeouts to 5000
Nov 02 18:21:06 Mike-PC autossh[35533]: starting ssh (count 1)
Nov 02 18:21:06 Mike-PC autossh[35533]: ssh child pid is 35537
```
**F?r Live-Logs:**
```bash
journalctl --user -u autossh-production.service -f
```
---
### `make setup-autossh`
Richtet autossh f?r persistente SSH-Verbindungen ein.
```bash
make setup-autossh
```
**Was passiert:**
- F?hrt das Setup-Script aus (`scripts/setup-autossh.sh both`)
- Erweitert SSH-Config mit Keep-Alive-Optionen
- Erstellt systemd Services f?r Production- und Git-Server
- Testet SSH-Verbindungen
**Siehe auch:** `docs/deployment/AUTOSSH-SETUP.md`
---
## SSH-Config
Die Makefile-Befehle nutzen die SSH-Config (`~/.ssh/config`):
### Production-Server
```ssh-config
Host production
HostName 94.16.110.151
User deploy
IdentityFile ~/.ssh/production
ServerAliveInterval 60
ServerAliveCountMax 3
TCPKeepAlive yes
Compression yes
StrictHostKeyChecking accept-new
```
### Git-Server
```ssh-config
Host git.michaelschiemer.de
HostName git.michaelschiemer.de
Port 2222
User git
IdentityFile ~/.ssh/git_michaelschiemer
ServerAliveInterval 60
ServerAliveCountMax 3
TCPKeepAlive yes
Compression yes
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
```
---
## Erweiterte Nutzung
### SSH mit zus?tzlichen Befehlen
Du kannst auch direkt `ssh` mit zus?tzlichen Befehlen verwenden:
```bash
# Remote-Befehl ausf?hren
ssh production "docker ps"
# SSH-Tunnel erstellen
ssh production -L 8080:localhost:80 -N
# Datei kopieren (SCP)
scp production:/path/to/file ./local-file
# Datei hochladen
scp ./local-file production:/path/to/file
```
### Mit dem Production-Server arbeiten
```bash
# Docker-Container Status pr?fen
make ssh
# Dann im SSH:
docker ps
cd /var/www/html && docker compose ps
# Logs anzeigen
cd ~/deployment/stacks/application && docker compose logs -f
```
---
## Troubleshooting
### SSH-Verbindung schl?gt fehl
**Problem**: `make ssh` verbindet nicht
**L?sung**:
1. Pr?fe SSH-Config:
```bash
cat ~/.ssh/config | grep -A 10 "Host production"
```
2. Teste Verbindung manuell:
```bash
ssh -v production
```
3. Pr?fe SSH-Schl?ssel:
```bash
ls -la ~/.ssh/production
```
4. Teste mit IP-Adresse:
```bash
ssh -i ~/.ssh/production deploy@94.16.110.151
```
### Autossh l?uft nicht
**Problem**: `make ssh-status` zeigt Service als inaktiv
**L?sung**:
1. Service starten:
```bash
systemctl --user start autossh-production.service
```
2. Service aktivieren:
```bash
systemctl --user enable autossh-production.service
```
3. Autossh neu einrichten:
```bash
make setup-autossh
```
### Verbindung bricht regelm??ig ab
**Problem**: SSH-Verbindung bricht auch mit autossh ab
**L?sung**:
1. Pr?fe autossh Status:
```bash
make ssh-status
```
2. Pr?fe Logs:
```bash
make ssh-logs
```
3. Teste Keep-Alive:
```bash
ssh -o ServerAliveInterval=30 -o ServerAliveCountMax=10 production
```
---
## Weitere SSH-Befehle im Makefile
Es gibt weitere SSH-bezogene Befehle im Makefile:
```bash
# Production-Container neu starten
make restart-production
# Production-Logs anzeigen
make logs-production
make logs-staging
# Production-Status pr?fen
make status-production
```
**Siehe auch:** `make help` f?r alle verf?gbaren Befehle
---
## Best Practices
1. **Nutze `make ssh` statt direkter SSH-Befehle**:
Dies stellt sicher, dass die korrekte Konfiguration verwendet wird.
2. **Pr?fe regelm??ig den autossh-Status**:
```bash
make ssh-status
```
3. **Nutze SSH-Config statt direkter IPs**:
Nutze `ssh production` statt `ssh deploy@94.16.110.151`
4. **Pr?fe Logs bei Problemen**:
```bash
make ssh-logs
```
---
## Referenzen
- **Autossh Setup**: `docs/deployment/AUTOSSH-SETUP.md`
- **Autossh Setup Abgeschlossen**: `docs/deployment/AUTOSSH-SETUP-COMPLETED.md`
- **Setup-Script**: `scripts/setup-autossh.sh`
- **SSH-Config**: `~/.ssh/config`
- **Makefile**: `Makefile`
---
## Zusammenfassung
? Makefile-Befehle f?r SSH-Verbindungen verf?gbar
? Einfache Verbindung zum Production-Server: `make ssh`
? Service-Status pr?fen: `make ssh-status`
? Logs anzeigen: `make ssh-logs`
? Autossh einrichten: `make setup-autossh`
Alle Befehle nutzen die konfigurierte SSH-Config und autossh f?r persistente Verbindungen.

View File

@@ -303,14 +303,20 @@ php console.php ssl:test
## Environment File Hierarchy
**New Base + Override Pattern (Development):**
```
.env.example # Template with placeholders
.env # Development (local, debug enabled)
.env.staging # Staging (production-like, staging SSL)
.env.production # Production (this template)
.env.example # Template with placeholders (documentation)
.env.base # Shared variables for all environments (versioned)
.env.local # Local development overrides (gitignored)
.env.staging # Staging-specific overrides (optional, gitignored)
.env.production # Production (generated by Ansible - this template)
```
**Load Priority**: `.env.production` > `.env` > Environment Variables > Defaults
**Production Load Priority**: Docker ENV vars → `.env.production` (generated by Ansible) → Environment Variables Defaults
**Development Load Priority**: `.env.base``.env.local` → System ENV vars
**Note**: Framework automatically loads `.env.base` + `.env.local` in development. For production, Ansible generates `.env.production` with `*_FILE` pattern for Docker Secrets.
## Docker Compose Integration

View File

@@ -0,0 +1,244 @@
#!/bin/bash
#
# Migration Script: .env → .env.base + .env.local
#
# This script helps migrate from the legacy single .env file
# to the new Base+Override Pattern (.env.base + .env.local)
#
# Usage:
# ./scripts/migrate-env-to-base-override.sh
#
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
echo "🔄 Migration: .env → .env.base + .env.local"
echo ""
# Check if .env exists
if [ ! -f .env ]; then
echo "❌ .env Datei nicht gefunden"
echo "💡 Erstelle zuerst .env aus .env.example"
exit 1
fi
# Backup existing .env
BACKUP_FILE=".env.backup.$(date +%Y%m%d-%H%M%S)"
echo "📦 Backup erstellen: $BACKUP_FILE"
cp .env "$BACKUP_FILE"
echo "✅ Backup erstellt"
# Check if .env.base exists
if [ -f .env.base ]; then
echo ""
echo "⚠️ .env.base existiert bereits"
echo "💡 .env.base wird als Basis verwendet"
USE_EXISTING_BASE=true
else
USE_EXISTING_BASE=false
fi
# Check if .env.local exists
if [ -f .env.local ]; then
echo ""
echo "⚠️ .env.local existiert bereits"
read -p "Überschreiben? (j/n): " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Jj]$ ]]; then
echo "❌ Abgebrochen"
exit 1
fi
BACKUP_LOCAL=".env.local.backup.$(date +%Y%m%d-%H%M%S)"
cp .env.local "$BACKUP_LOCAL"
echo "📦 Backup von .env.local erstellt: $BACKUP_LOCAL"
fi
echo ""
echo "📝 Analysiere .env Datei..."
# Common variables that should go to .env.base
# (These are typically environment-agnostic)
BASE_VARS=(
"APP_NAME"
"APP_TIMEZONE"
"APP_LOCALE"
"DB_DRIVER"
"DB_PORT"
"DB_CHARSET"
"REDIS_PORT"
"CACHE_DRIVER"
"SESSION_DRIVER"
"SESSION_LIFETIME"
"QUEUE_DRIVER"
"QUEUE_CONNECTION"
"QUEUE_WORKER_SLEEP"
"QUEUE_WORKER_TRIES"
"QUEUE_WORKER_TIMEOUT"
"SECURITY_RATE_LIMIT_PER_MINUTE"
"SECURITY_RATE_LIMIT_BURST"
"CACHE_PREFIX"
)
# Local-specific variables (development overrides)
LOCAL_VARS=(
"APP_ENV"
"APP_DEBUG"
"APP_URL"
"APP_KEY"
"APP_DOMAIN"
"DB_HOST"
"DB_DATABASE"
"DB_USERNAME"
"DB_PASSWORD"
"REDIS_HOST"
"REDIS_PASSWORD"
"SECURITY_ALLOWED_HOSTS"
"FORCE_HTTPS"
"XDEBUG_MODE"
"PHP_IDE_CONFIG"
)
# Variables that should NOT be in .env.base (secrets)
SECRET_PATTERNS=(
"PASSWORD"
"SECRET"
"KEY"
"TOKEN"
"ENCRYPTION"
"VAULT"
)
echo ""
echo "📋 Trenne Variablen in Base und Local..."
# Create temporary files
TMP_BASE=$(mktemp)
TMP_LOCAL=$(mktemp)
# Read .env line by line
while IFS= read -r line || [ -n "$line" ]; do
# Skip empty lines and comments
if [[ -z "$line" || "$line" =~ ^[[:space:]]*# ]]; then
echo "$line" >> "$TMP_BASE"
echo "$line" >> "$TMP_LOCAL"
continue
fi
# Extract variable name
if [[ "$line" =~ ^([A-Za-z_][A-Za-z0-9_]*)= ]]; then
VAR_NAME="${BASH_REMATCH[1]}"
# Check if it's a secret
IS_SECRET=false
for pattern in "${SECRET_PATTERNS[@]}"; do
if [[ "$VAR_NAME" == *"$pattern"* ]]; then
IS_SECRET=true
break
fi
done
if [ "$IS_SECRET" = true ]; then
# Secrets go to .env.local (or should be in Docker Secrets)
echo "# TODO: Möglicherweise in Docker Secrets verschieben" >> "$TMP_LOCAL"
echo "$line" >> "$TMP_LOCAL"
continue
fi
# Check if it's a base variable
IS_BASE=false
for base_var in "${BASE_VARS[@]}"; do
if [[ "$VAR_NAME" == "$base_var" ]]; then
IS_BASE=true
break
fi
done
# Check if it's a local variable
IS_LOCAL=false
for local_var in "${LOCAL_VARS[@]}"; do
if [[ "$VAR_NAME" == "$local_var" ]]; then
IS_LOCAL=true
break
fi
done
if [ "$IS_BASE" = true ]; then
# Go to .env.base
echo "$line" >> "$TMP_BASE"
elif [ "$IS_LOCAL" = true ]; then
# Go to .env.local
echo "$line" >> "$TMP_LOCAL"
else
# Unknown: Ask or put in local as default
echo "# TODO: Prüfen ob Base oder Local" >> "$TMP_LOCAL"
echo "$line" >> "$TMP_LOCAL"
fi
else
# Non-standard line format: keep in both (shouldn't happen)
echo "$line" >> "$TMP_BASE"
echo "$line" >> "$TMP_LOCAL"
fi
done < .env
# Create .env.base if it doesn't exist
if [ "$USE_EXISTING_BASE" = false ]; then
echo ""
echo "📝 Erstelle .env.base..."
cat > .env.base << 'EOF'
# Base Environment Configuration
# This file contains shared environment variables for all environments.
# Use with environment-specific override files:
# - .env.local (local development overrides)
# - .env.staging (staging-specific overrides, optional)
# - .env.production (production - generated by Ansible)
#
# Framework automatically loads: .env.base → .env.local (if exists)
# See ENV_SETUP.md for details
#
EOF
cat "$TMP_BASE" >> .env.base
echo "✅ .env.base erstellt"
else
echo ""
echo " .env.base existiert bereits, wird nicht überschrieben"
fi
# Create .env.local
echo ""
echo "📝 Erstelle .env.local..."
cat > .env.local << 'EOF'
# Local Development Environment Overrides
# This file overrides .env.base with local development-specific settings.
# This file is gitignored - each developer has their own version.
#
# Framework loads: .env.base → .env.local (this file) → System ENV vars
# See ENV_SETUP.md for details
#
EOF
cat "$TMP_LOCAL" >> .env.local
echo "✅ .env.local erstellt"
# Cleanup
rm -f "$TMP_BASE" "$TMP_LOCAL"
echo ""
echo "✅ Migration abgeschlossen!"
echo ""
echo "📋 Nächste Schritte:"
echo " 1. Prüfe .env.base - entferne Secrets falls vorhanden"
echo " 2. Prüfe .env.local - passe lokale Overrides an"
echo " 3. Teste die Anwendung: make up"
echo " 4. Optional: .env kann später entfernt werden (wird als Fallback geladen)"
echo ""
echo "📝 Backup-Dateien:"
echo " - $BACKUP_FILE"
if [ -n "$BACKUP_LOCAL" ]; then
echo " - $BACKUP_LOCAL"
fi
echo ""
echo "💡 Siehe ENV_SETUP.md für Details zur neuen Struktur"

238
scripts/setup-autossh.sh Executable file
View File

@@ -0,0 +1,238 @@
#!/bin/bash
# Setup script for autossh persistent SSH connections
# Usage: ./scripts/setup-autossh.sh [production|git|both]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SSH_CONFIG="$HOME/.ssh/config"
SERVICE_TYPE="${1:-both}"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Functions
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if autossh is installed
check_autossh() {
if ! command -v autossh &> /dev/null; then
log_error "autossh is not installed!"
echo ""
echo "Installation:"
echo " Ubuntu/Debian: sudo apt install autossh"
echo " macOS: brew install autossh"
exit 1
fi
log_info "autossh is installed: $(which autossh)"
}
# Check if SSH config exists
check_ssh_config() {
if [ ! -d "$HOME/.ssh" ]; then
log_info "Creating ~/.ssh directory"
mkdir -p "$HOME/.ssh"
chmod 700 "$HOME/.ssh"
fi
if [ ! -f "$SSH_CONFIG" ]; then
log_info "Creating SSH config file"
touch "$SSH_CONFIG"
chmod 600 "$SSH_CONFIG"
fi
}
# Add SSH config entries
add_ssh_config() {
log_info "Checking SSH config..."
# Production server config
if ! grep -q "Host production" "$SSH_CONFIG" 2>/dev/null; then
log_info "Adding production server config to SSH config"
cat >> "$SSH_CONFIG" << 'EOF'
# Production Server - Persistent Connection
Host production
HostName 94.16.110.151
User deploy
IdentityFile ~/.ssh/production
ServerAliveInterval 60
ServerAliveCountMax 3
TCPKeepAlive yes
Compression yes
StrictHostKeyChecking accept-new
EOF
else
log_info "Production server config already exists in SSH config"
fi
# Git server config
if ! grep -q "Host git.michaelschiemer.de" "$SSH_CONFIG" 2>/dev/null; then
log_info "Adding git server config to SSH config"
cat >> "$SSH_CONFIG" << 'EOF'
# Git Server - Persistent Connection
Host git.michaelschiemer.de
HostName git.michaelschiemer.de
Port 2222
User git
IdentityFile ~/.ssh/git_michaelschiemer
ServerAliveInterval 60
ServerAliveCountMax 3
TCPKeepAlive yes
Compression yes
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
EOF
else
log_info "Git server config already exists in SSH config"
fi
}
# Create systemd service
create_systemd_service() {
local host=$1
local port=$2
local service_name="autossh-${host}"
local service_dir="$HOME/.config/systemd/user"
log_info "Creating systemd service for ${host}..."
mkdir -p "$service_dir"
cat > "${service_dir}/${service_name}.service" << EOF
[Unit]
Description=AutoSSH for ${host}
After=network.target
[Service]
Type=simple
Environment="AUTOSSH_GATETIME=0"
Environment="AUTOSSH_POLL=10"
ExecStart=/usr/bin/autossh -M ${port} -N -o "ServerAliveInterval=60" -o "ServerAliveCountMax=3" ${host}
Restart=always
RestartSec=10
[Install]
WantedBy=default.target
EOF
log_info "Systemd service created: ${service_dir}/${service_name}.service"
}
# Setup systemd services
setup_systemd_services() {
if ! systemctl --user --version &> /dev/null; then
log_warn "systemd user services not available (might be on macOS or non-systemd system)"
log_info "Skipping systemd service setup. See docs/deployment/AUTOSSH-SETUP.md for manual setup."
return
fi
log_info "Setting up systemd services..."
case "$SERVICE_TYPE" in
production)
create_systemd_service "production" "20000"
systemctl --user daemon-reload
log_info "To enable: systemctl --user enable autossh-production.service"
log_info "To start: systemctl --user start autossh-production.service"
;;
git)
create_systemd_service "git.michaelschiemer.de" "20001"
systemctl --user daemon-reload
log_info "To enable: systemctl --user enable autossh-git.michaelschiemer.de.service"
log_info "To start: systemctl --user start autossh-git.michaelschiemer.de.service"
;;
both)
create_systemd_service "production" "20000"
create_systemd_service "git.michaelschiemer.de" "20001"
systemctl --user daemon-reload
log_info "To enable:"
log_info " systemctl --user enable autossh-production.service"
log_info " systemctl --user enable autossh-git.michaelschiemer.de.service"
log_info "To start:"
log_info " systemctl --user start autossh-production.service"
log_info " systemctl --user start autossh-git.michaelschiemer.de.service"
;;
*)
log_error "Invalid service type: $SERVICE_TYPE"
log_info "Usage: $0 [production|git|both]"
exit 1
;;
esac
}
# Test SSH connections
test_connections() {
log_info "Testing SSH connections..."
case "$SERVICE_TYPE" in
production)
if ssh -o ConnectTimeout=5 production "echo 'Connection successful'" 2>/dev/null; then
log_info "? Production server connection successful"
else
log_warn "?? Production server connection failed"
log_info "Make sure SSH key is set up: ssh-keygen -t ed25519 -f ~/.ssh/production"
fi
;;
git)
if ssh -o ConnectTimeout=5 git.michaelschiemer.de "echo 'Connection successful'" 2>/dev/null; then
log_info "? Git server connection successful"
else
log_warn "?? Git server connection failed"
log_info "Make sure SSH key is set up: ssh-keygen -t ed25519 -f ~/.ssh/git_michaelschiemer"
fi
;;
both)
if ssh -o ConnectTimeout=5 production "echo 'Connection successful'" 2>/dev/null; then
log_info "? Production server connection successful"
else
log_warn "?? Production server connection failed"
fi
if ssh -o ConnectTimeout=5 git.michaelschiemer.de "echo 'Connection successful'" 2>/dev/null; then
log_info "? Git server connection successful"
else
log_warn "?? Git server connection failed"
fi
;;
esac
}
# Main execution
main() {
log_info "Setting up autossh for persistent SSH connections"
echo ""
check_autossh
check_ssh_config
add_ssh_config
setup_systemd_services
test_connections
echo ""
log_info "Setup complete!"
echo ""
log_info "Next steps:"
echo " 1. Review SSH config: cat ~/.ssh/config"
echo " 2. Enable systemd services (see output above)"
echo " 3. Start services (see output above)"
echo " 4. Check status: systemctl --user status autossh-*.service"
echo ""
log_info "Documentation: docs/deployment/AUTOSSH-SETUP.md"
}
main

View File

@@ -0,0 +1,290 @@
#!/bin/bash
set -uo pipefail
# Test script for deployment with new docker-compose files and secret management
# This script validates the deployment configuration and tests secret loading
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SECRETS_DIR="$PROJECT_ROOT/secrets"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test results
TESTS_PASSED=0
TESTS_FAILED=0
TESTS_TOTAL=0
print_header() {
echo ""
echo -e "${BLUE}????????????????????????????????????????????????????????????????????${NC}"
echo -e "${BLUE}$1${NC}"
echo -e "${BLUE}????????????????????????????????????????????????????????????????????${NC}"
}
print_test() {
echo -e "${YELLOW}[TEST]${NC} $1"
}
print_success() {
echo -e "${GREEN}[?]${NC} $1"
((TESTS_PASSED++))
}
print_error() {
echo -e "${RED}[?]${NC} $1"
((TESTS_FAILED++))
}
run_test() {
((TESTS_TOTAL++))
local test_name="$1"
shift
print_test "$test_name"
if "$@" 2>/dev/null; then
print_success "$test_name"
return 0
else
print_error "$test_name"
return 1
fi
}
# Cleanup function
cleanup() {
echo ""
echo -e "${YELLOW}Cleaning up test artifacts...${NC}"
# Remove test secrets directory
if [ -d "$SECRETS_DIR" ] && [ -f "$SECRETS_DIR/.test-marker" ]; then
rm -rf "$SECRETS_DIR"
print_success "Test secrets directory removed"
fi
}
trap cleanup EXIT
print_header "?? Testing Deployment with New Docker Compose Files & Secret Management"
# ============================================================================
# Phase 1: Validate Docker Compose Files
# ============================================================================
print_header "Phase 1: Validating Docker Compose Files"
run_test "docker-compose.base.yml exists" test -f "$PROJECT_ROOT/docker-compose.base.yml"
run_test "docker-compose.local.yml exists" test -f "$PROJECT_ROOT/docker-compose.local.yml"
run_test "docker-compose.staging.yml exists" test -f "$PROJECT_ROOT/docker-compose.staging.yml"
run_test "docker-compose.production.yml exists" test -f "$PROJECT_ROOT/docker-compose.production.yml"
# Validate docker-compose syntax
if command -v docker-compose &> /dev/null || command -v docker &> /dev/null; then
run_test "docker-compose.base.yml syntax valid" bash -c "cd '$PROJECT_ROOT' && docker-compose -f docker-compose.base.yml config > /dev/null 2>&1 || docker compose -f docker-compose.base.yml config > /dev/null 2>&1"
run_test "docker-compose.local.yml syntax valid" bash -c "cd '$PROJECT_ROOT' && docker-compose -f docker-compose.base.yml -f docker-compose.local.yml config > /dev/null 2>&1 || docker compose -f docker-compose.base.yml -f docker-compose.local.yml config > /dev/null 2>&1"
run_test "docker-compose.staging.yml syntax valid" bash -c "cd '$PROJECT_ROOT' && docker-compose -f docker-compose.base.yml -f docker-compose.staging.yml config > /dev/null 2>&1 || docker compose -f docker-compose.base.yml -f docker-compose.staging.yml config > /dev/null 2>&1"
run_test "docker-compose.production.yml syntax valid" bash -c "cd '$PROJECT_ROOT' && docker-compose -f docker-compose.base.yml -f docker-compose.production.yml config > /dev/null 2>&1 || docker compose -f docker-compose.base.yml -f docker-compose.production.yml config > /dev/null 2>&1"
else
print_error "docker-compose or docker not available, skipping syntax validation"
fi
# ============================================================================
# Phase 2: Validate Secret Configuration
# ============================================================================
print_header "Phase 2: Validating Secret Configuration"
# Check that secrets are defined in docker-compose.base.yml
run_test "secrets section exists in docker-compose.base.yml" grep -q "^secrets:" "$PROJECT_ROOT/docker-compose.base.yml"
run_test "db_root_password secret defined" grep -q "db_root_password:" "$PROJECT_ROOT/docker-compose.base.yml"
run_test "db_user_password secret defined" grep -q "db_user_password:" "$PROJECT_ROOT/docker-compose.base.yml"
run_test "redis_password secret defined" grep -q "redis_password:" "$PROJECT_ROOT/docker-compose.base.yml"
run_test "app_key secret defined" grep -q "app_key:" "$PROJECT_ROOT/docker-compose.base.yml"
run_test "vault_encryption_key secret defined" grep -q "vault_encryption_key:" "$PROJECT_ROOT/docker-compose.base.yml"
run_test "git_token secret defined" grep -q "git_token:" "$PROJECT_ROOT/docker-compose.base.yml"
# Check that production uses secrets
run_test "production uses db_user_password secret" grep -q "db_user_password" "$PROJECT_ROOT/docker-compose.production.yml"
run_test "production uses redis_password secret" grep -q "redis_password" "$PROJECT_ROOT/docker-compose.production.yml"
run_test "production uses app_key secret" grep -q "app_key" "$PROJECT_ROOT/docker-compose.production.yml"
run_test "production uses vault_encryption_key secret" grep -q "vault_encryption_key" "$PROJECT_ROOT/docker-compose.production.yml"
# Check that staging uses secrets
run_test "staging uses db_user_password secret" grep -q "db_user_password" "$PROJECT_ROOT/docker-compose.staging.yml"
run_test "staging uses redis_password secret" grep -q "redis_password" "$PROJECT_ROOT/docker-compose.staging.yml"
run_test "staging uses app_key secret" grep -q "app_key" "$PROJECT_ROOT/docker-compose.staging.yml"
run_test "staging uses vault_encryption_key secret" grep -q "vault_encryption_key" "$PROJECT_ROOT/docker-compose.staging.yml"
# ============================================================================
# Phase 3: Create Test Secrets
# ============================================================================
print_header "Phase 3: Creating Test Secrets"
# Create test secrets directory
mkdir -p "$SECRETS_DIR"
echo "test-marker" > "$SECRETS_DIR/.test-marker"
# Create test secret files
echo "test-db-root-password-12345" > "$SECRETS_DIR/db_root_password.txt"
echo "test-db-user-password-67890" > "$SECRETS_DIR/db_user_password.txt"
echo "test-redis-password-abcde" > "$SECRETS_DIR/redis_password.txt"
echo "test-app-key-base64encoded123456789012345678901234567890" > "$SECRETS_DIR/app_key.txt"
echo "test-vault-encryption-key-32charslong12345678" > "$SECRETS_DIR/vault_encryption_key.txt"
echo "test-git-token-ghp_test12345678901234567890" > "$SECRETS_DIR/git_token.txt"
# Set secure permissions
chmod 600 "$SECRETS_DIR"/*.txt 2>/dev/null || true
run_test "Test secrets directory created" test -d "$SECRETS_DIR"
run_test "db_root_password.txt created" test -f "$SECRETS_DIR/db_root_password.txt"
run_test "db_user_password.txt created" test -f "$SECRETS_DIR/db_user_password.txt"
run_test "redis_password.txt created" test -f "$SECRETS_DIR/redis_password.txt"
run_test "app_key.txt created" test -f "$SECRETS_DIR/app_key.txt"
run_test "vault_encryption_key.txt created" test -f "$SECRETS_DIR/vault_encryption_key.txt"
run_test "git_token.txt created" test -f "$SECRETS_DIR/git_token.txt"
# ============================================================================
# Phase 4: Test Secret File References
# ============================================================================
print_header "Phase 4: Validating Secret File References"
# Check that docker-compose files reference correct secret file paths
run_test "base.yml references ./secrets/db_root_password.txt" grep -q "./secrets/db_root_password.txt" "$PROJECT_ROOT/docker-compose.base.yml"
run_test "base.yml references ./secrets/db_user_password.txt" grep -q "./secrets/db_user_password.txt" "$PROJECT_ROOT/docker-compose.base.yml"
run_test "base.yml references ./secrets/redis_password.txt" grep -q "./secrets/redis_password.txt" "$PROJECT_ROOT/docker-compose.base.yml"
run_test "base.yml references ./secrets/app_key.txt" grep -q "./secrets/app_key.txt" "$PROJECT_ROOT/docker-compose.base.yml"
run_test "base.yml references ./secrets/vault_encryption_key.txt" grep -q "./secrets/vault_encryption_key.txt" "$PROJECT_ROOT/docker-compose.base.yml"
run_test "base.yml references ./secrets/git_token.txt" grep -q "./secrets/git_token.txt" "$PROJECT_ROOT/docker-compose.base.yml"
# ============================================================================
# Phase 5: Test *_FILE Pattern Support
# ============================================================================
print_header "Phase 5: Testing *_FILE Pattern Support"
# Check that docker-compose files use *_FILE pattern
run_test "production uses DB_PASSWORD_FILE pattern" grep -q "DB_PASSWORD_FILE" "$PROJECT_ROOT/docker-compose.production.yml" || grep -q "db_user_password" "$PROJECT_ROOT/docker-compose.production.yml"
run_test "production uses REDIS_PASSWORD_FILE pattern" grep -q "REDIS_PASSWORD_FILE" "$PROJECT_ROOT/docker-compose.production.yml" || grep -q "redis_password" "$PROJECT_ROOT/docker-compose.production.yml"
run_test "production uses APP_KEY_FILE pattern" grep -q "APP_KEY_FILE" "$PROJECT_ROOT/docker-compose.production.yml" || grep -q "app_key" "$PROJECT_ROOT/docker-compose.production.yml"
run_test "production uses VAULT_ENCRYPTION_KEY_FILE pattern" grep -q "VAULT_ENCRYPTION_KEY_FILE" "$PROJECT_ROOT/docker-compose.production.yml" || grep -q "vault_encryption_key" "$PROJECT_ROOT/docker-compose.production.yml"
run_test "staging uses DB_PASSWORD_FILE pattern" grep -q "DB_PASSWORD_FILE" "$PROJECT_ROOT/docker-compose.staging.yml" || grep -q "db_user_password" "$PROJECT_ROOT/docker-compose.staging.yml"
run_test "staging uses APP_KEY_FILE pattern" grep -q "APP_KEY_FILE" "$PROJECT_ROOT/docker-compose.staging.yml" || grep -q "app_key" "$PROJECT_ROOT/docker-compose.staging.yml"
run_test "staging uses VAULT_ENCRYPTION_KEY_FILE pattern" grep -q "VAULT_ENCRYPTION_KEY_FILE" "$PROJECT_ROOT/docker-compose.staging.yml" || grep -q "vault_encryption_key" "$PROJECT_ROOT/docker-compose.staging.yml"
run_test "staging uses GIT_TOKEN_FILE pattern" grep -q "GIT_TOKEN_FILE" "$PROJECT_ROOT/docker-compose.staging.yml" || grep -q "git_token" "$PROJECT_ROOT/docker-compose.staging.yml"
# ============================================================================
# Phase 6: Test DockerSecretsResolver Integration
# ============================================================================
print_header "Phase 6: Testing DockerSecretsResolver Integration"
# Check that DockerSecretsResolver exists
run_test "DockerSecretsResolver.php exists" test -f "$PROJECT_ROOT/src/Framework/Config/DockerSecretsResolver.php"
# Check that Environment.php uses DockerSecretsResolver
run_test "Environment.php imports DockerSecretsResolver" grep -q "DockerSecretsResolver" "$PROJECT_ROOT/src/Framework/Config/Environment.php"
run_test "Environment.php resolves secrets via *_FILE pattern" grep -q "secretsResolver->resolve" "$PROJECT_ROOT/src/Framework/Config/Environment.php"
# Test secret resolution logic
if command -v php &> /dev/null; then
PHP_TEST_SCRIPT=$(cat <<'PHPEOF'
<?php
require_once __DIR__ . '/vendor/autoload.php';
use App\Framework\Config\DockerSecretsResolver;
$resolver = new DockerSecretsResolver();
// Test 1: Resolve secret from file
$variables = [
'DB_PASSWORD_FILE' => __DIR__ . '/secrets/db_user_password.txt',
];
$result = $resolver->resolve('DB_PASSWORD', $variables);
if ($result === 'test-db-user-password-67890') {
echo "? Secret resolution works\n";
exit(0);
} else {
echo "? Secret resolution failed: got '$result'\n";
exit(1);
}
PHPEOF
)
echo "$PHP_TEST_SCRIPT" > "$PROJECT_ROOT/test_secret_resolver.php"
run_test "DockerSecretsResolver resolves secrets correctly" bash -c "cd '$PROJECT_ROOT' && php test_secret_resolver.php > /dev/null 2>&1"
# Cleanup
rm -f "$PROJECT_ROOT/test_secret_resolver.php"
else
print_error "PHP not available, skipping DockerSecretsResolver test"
fi
# ============================================================================
# Phase 7: Test EncryptedEnvLoader Integration
# ============================================================================
print_header "Phase 7: Testing EncryptedEnvLoader Integration"
run_test "EncryptedEnvLoader.php exists" test -f "$PROJECT_ROOT/src/Framework/Config/EncryptedEnvLoader.php"
run_test "EncryptedEnvLoader loads system environment" grep -q "loadSystemEnvironment" "$PROJECT_ROOT/src/Framework/Config/EncryptedEnvLoader.php"
run_test "EncryptedEnvLoader supports encryption key" grep -q "ENCRYPTION_KEY" "$PROJECT_ROOT/src/Framework/Config/EncryptedEnvLoader.php"
# ============================================================================
# Phase 8: Test Entrypoint Script
# ============================================================================
print_header "Phase 8: Testing Entrypoint Script"
run_test "entrypoint.sh exists" test -f "$PROJECT_ROOT/docker/entrypoint.sh"
run_test "entrypoint.sh loads secrets from *_FILE pattern" grep -q "_FILE" "$PROJECT_ROOT/docker/entrypoint.sh" || grep -q "DockerSecretsResolver" "$PROJECT_ROOT/docker/entrypoint.sh"
run_test "entrypoint.sh is executable" test -x "$PROJECT_ROOT/docker/entrypoint.sh" || [ -f "$PROJECT_ROOT/docker/entrypoint.sh" ]
# ============================================================================
# Phase 9: Validate Service Configuration
# ============================================================================
print_header "Phase 9: Validating Service Configuration"
# Check that services reference secrets correctly
run_test "production php service uses secrets" grep -A 5 "php:" "$PROJECT_ROOT/docker-compose.production.yml" | grep -q "secrets:" || grep -q "APP_KEY_FILE" "$PROJECT_ROOT/docker-compose.production.yml"
run_test "production queue-worker uses secrets" grep -A 10 "queue-worker:" "$PROJECT_ROOT/docker-compose.production.yml" | grep -q "secrets:" || grep -q "DB_PASSWORD_FILE" "$PROJECT_ROOT/docker-compose.production.yml"
run_test "staging-app uses secrets" grep -A 10 "staging-app:" "$PROJECT_ROOT/docker-compose.staging.yml" | grep -q "secrets:" || grep -q "DB_PASSWORD_FILE" "$PROJECT_ROOT/docker-compose.staging.yml"
# ============================================================================
# Phase 10: Test Docker Compose Override Chain
# ============================================================================
print_header "Phase 10: Testing Docker Compose Override Chain"
# Test that override chain works correctly
if command -v docker-compose &> /dev/null || command -v docker &> /dev/null; then
run_test "local override combines with base" bash -c "cd '$PROJECT_ROOT' && (docker-compose -f docker-compose.base.yml -f docker-compose.local.yml config > /dev/null 2>&1 || docker compose -f docker-compose.base.yml -f docker-compose.local.yml config > /dev/null 2>&1)"
run_test "staging override combines with base" bash -c "cd '$PROJECT_ROOT' && (docker-compose -f docker-compose.base.yml -f docker-compose.staging.yml config > /dev/null 2>&1 || docker compose -f docker-compose.base.yml -f docker-compose.staging.yml config > /dev/null 2>&1)"
run_test "production override combines with base" bash -c "cd '$PROJECT_ROOT' && (docker-compose -f docker-compose.base.yml -f docker-compose.production.yml config > /dev/null 2>&1 || docker compose -f docker-compose.base.yml -f docker-compose.production.yml config > /dev/null 2>&1)"
else
print_error "docker-compose not available, skipping override chain test"
fi
# ============================================================================
# Summary
# ============================================================================
print_header "Test Summary"
echo ""
echo -e "Total Tests: ${TESTS_TOTAL}"
echo -e "${GREEN}Passed: ${TESTS_PASSED}${NC}"
echo -e "${RED}Failed: ${TESTS_FAILED}${NC}"
echo ""
if [ $TESTS_FAILED -eq 0 ]; then
echo -e "${GREEN}? All tests passed!${NC}"
echo ""
echo -e "${BLUE}Next steps:${NC}"
echo " 1. Deploy secrets to your server using Ansible Vault"
echo " 2. Run: docker-compose -f docker-compose.base.yml -f docker-compose.production.yml up -d"
echo " 3. Verify secrets are loaded correctly in containers"
exit 0
else
echo -e "${RED}? Some tests failed. Please review the errors above.${NC}"
exit 1
fi

View File

@@ -92,10 +92,28 @@ final readonly class EncryptedEnvLoader
// Development: .env files → System ENV (local development workflow)
$variables = $systemVariables;
// Load base .env file (can override system env for development)
$envFile = $baseDir->join('.env');
if ($envFile->exists()) {
$variables = array_merge($variables, $this->parser->parse($envFile));
// Load .env.base file first (shared base configuration)
$envBaseFile = $baseDir->join('.env.base');
if ($envBaseFile->exists()) {
$baseVariables = $this->parser->parse($envBaseFile);
$variables = array_merge($variables, $baseVariables);
}
// Load .env.local file (local development overrides)
// This overrides values from .env.base
$envLocalFile = $baseDir->join('.env.local');
if ($envLocalFile->exists()) {
$localVariables = $this->parser->parse($envLocalFile);
$variables = array_merge($variables, $localVariables);
}
// Fallback: Load legacy .env file if .env.base/.env.local don't exist
// This maintains backward compatibility during migration
if (!$envBaseFile->exists() && !$envLocalFile->exists()) {
$envFile = $baseDir->join('.env');
if ($envFile->exists()) {
$variables = array_merge($variables, $this->parser->parse($envFile));
}
}
}
@@ -121,7 +139,7 @@ final readonly class EncryptedEnvLoader
}
}
} else {
// Development: Allow override
// Development/Staging: Allow override
$variables = array_merge($variables, $this->parser->parse($envSpecificFile));
}
}

View File

@@ -45,6 +45,15 @@ describe('EncryptedEnvLoader', function () {
if (isset($this->envDevelopmentFile) && file_exists($this->envDevelopmentFile)) {
unlink($this->envDevelopmentFile);
}
if (isset($this->envBaseFile) && file_exists($this->envBaseFile)) {
unlink($this->envBaseFile);
}
if (isset($this->envLocalFile) && file_exists($this->envLocalFile)) {
unlink($this->envLocalFile);
}
if (isset($this->envStagingFile) && file_exists($this->envStagingFile)) {
unlink($this->envStagingFile);
}
});
describe('load()', function () {
@@ -197,6 +206,197 @@ ENV);
});
});
describe('loadEnvironment() - Base + Override Pattern', function () {
it('loads .env.base first, then .env.local (local overrides base)', function () {
$_ENV['APP_ENV'] = 'development';
// Base file with common variables
$this->envBaseFile = $this->testDir . '/.env.base';
file_put_contents($this->envBaseFile, <<<ENV
APP_NAME=BaseApp
DB_HOST=db
DB_PORT=5432
DB_DATABASE=michaelschiemer
CACHE_PREFIX=app
ENV);
// Local file with overrides
$this->envLocalFile = $this->testDir . '/.env.local';
file_put_contents($this->envLocalFile, <<<ENV
APP_ENV=development
APP_DEBUG=true
DB_HOST=localhost
DB_PORT=3307
CACHE_PREFIX=local
ENV);
$env = $this->loader->loadEnvironment($this->testDir);
// Base values
expect($env->get('APP_NAME'))->toBe('BaseApp');
expect($env->get('DB_DATABASE'))->toBe('michaelschiemer');
// Local overrides
expect($env->get('APP_ENV'))->toBe('development');
expect($env->getBool('APP_DEBUG'))->toBeTrue();
expect($env->get('DB_HOST'))->toBe('localhost');
expect($env->getInt('DB_PORT'))->toBe(3307);
expect($env->get('CACHE_PREFIX'))->toBe('local');
});
it('loads .env.local only if .env.base exists', function () {
$_ENV['APP_ENV'] = 'development';
// Only .env.local (should fallback to legacy .env)
$this->envLocalFile = $this->testDir . '/.env.local';
file_put_contents($this->envLocalFile, <<<ENV
APP_ENV=development
DB_HOST=localhost
ENV);
// Legacy .env file (fallback)
$this->envFile = $this->testDir . '/.env';
file_put_contents($this->envFile, <<<ENV
APP_NAME=LegacyApp
DB_PORT=3306
ENV);
$env = $this->loader->loadEnvironment($this->testDir);
// Should load from legacy .env (fallback)
expect($env->get('APP_NAME'))->toBe('LegacyApp');
expect($env->getInt('DB_PORT'))->toBe(3306);
// .env.local should not be loaded if .env.base doesn't exist
// (Fallback logic: only load .env.local if .env.base exists)
});
it('falls back to legacy .env if .env.base and .env.local do not exist', function () {
$_ENV['APP_ENV'] = 'development';
// Only legacy .env file
$this->envFile = $this->testDir . '/.env';
file_put_contents($this->envFile, <<<ENV
APP_NAME=LegacyApp
APP_ENV=development
DB_HOST=localhost
ENV);
$env = $this->loader->loadEnvironment($this->testDir);
// Should load from legacy .env
expect($env->get('APP_NAME'))->toBe('LegacyApp');
expect($env->get('APP_ENV'))->toBe('development');
expect($env->get('DB_HOST'))->toBe('localhost');
});
it('prioritizes system ENV over .env.base and .env.local', function () {
$_ENV['APP_ENV'] = 'development';
$_ENV['DB_HOST'] = 'system_host';
$this->envBaseFile = $this->testDir . '/.env.base';
file_put_contents($this->envBaseFile, <<<ENV
APP_NAME=BaseApp
DB_HOST=db
ENV);
$this->envLocalFile = $this->testDir . '/.env.local';
file_put_contents($this->envLocalFile, <<<ENV
DB_HOST=localhost
ENV);
$env = $this->loader->loadEnvironment($this->testDir);
// System ENV should win
expect($env->get('DB_HOST'))->toBe('system_host');
// Base values should be loaded
expect($env->get('APP_NAME'))->toBe('BaseApp');
});
it('merges .env.base, .env.local, and .env.secrets correctly', function () {
$_ENV['APP_ENV'] = 'development';
$this->envBaseFile = $this->testDir . '/.env.base';
file_put_contents($this->envBaseFile, <<<ENV
APP_NAME=BaseApp
DB_HOST=db
ENV);
$this->envLocalFile = $this->testDir . '/.env.local';
file_put_contents($this->envLocalFile, <<<ENV
DB_HOST=localhost
ENV);
$this->secretsFile = $this->testDir . '/.env.secrets';
file_put_contents($this->secretsFile, <<<ENV
SECRET_API_KEY=my_secret
ENV);
$encryptionKey = 'test_encryption_key_32_chars_long';
$env = $this->loader->loadEnvironment($this->testDir, $encryptionKey);
// Base + Local + Secrets
expect($env->get('APP_NAME'))->toBe('BaseApp');
expect($env->get('DB_HOST'))->toBe('localhost');
expect($env->get('SECRET_API_KEY'))->toBe('my_secret');
});
it('loads .env.staging in staging environment', function () {
$_ENV['APP_ENV'] = 'staging';
$this->envBaseFile = $this->testDir . '/.env.base';
file_put_contents($this->envBaseFile, <<<ENV
APP_NAME=BaseApp
DB_HOST=db
ENV);
$this->envStagingFile = $this->testDir . '/.env.staging';
file_put_contents($this->envStagingFile, <<<ENV
APP_ENV=staging
APP_DEBUG=false
DB_HOST=staging_db
STAGING_FEATURE=enabled
ENV);
$env = $this->loader->loadEnvironment($this->testDir);
// Base values
expect($env->get('APP_NAME'))->toBe('BaseApp');
// Staging overrides
expect($env->get('APP_ENV'))->toBe('staging');
expect($env->getBool('APP_DEBUG'))->toBeFalse();
expect($env->get('DB_HOST'))->toBe('staging_db');
expect($env->get('STAGING_FEATURE'))->toBe('enabled');
});
it('prioritizes .env.staging over .env.local in staging environment', function () {
$_ENV['APP_ENV'] = 'staging';
$this->envBaseFile = $this->testDir . '/.env.base';
file_put_contents($this->envBaseFile, <<<ENV
DB_HOST=db
ENV);
$this->envLocalFile = $this->testDir . '/.env.local';
file_put_contents($this->envLocalFile, <<<ENV
DB_HOST=localhost
ENV);
$this->envStagingFile = $this->testDir . '/.env.staging';
file_put_contents($this->envStagingFile, <<<ENV
DB_HOST=staging_host
ENV);
$env = $this->loader->loadEnvironment($this->testDir);
// Staging should win
expect($env->get('DB_HOST'))->toBe('staging_host');
});
});
describe('loadEnvironment() - Development Priority', function () {
it('allows .env file to override system environment in development', function () {
// Simulate system environment