- Create AnsibleDeployStage using framework's Process module for secure command execution - Integrate AnsibleDeployStage into DeploymentPipelineCommands for production deployments - Add force_deploy flag support in Ansible playbook to override stale locks - Use PHP deployment module as orchestrator (php console.php deploy:production) - Fix ErrorAggregationInitializer to use Environment class instead of $_ENV superglobal Architecture: - BuildStage → AnsibleDeployStage → HealthCheckStage for production - Process module provides timeout, error handling, and output capture - Ansible playbook supports rollback via rollback-git-based.yml - Zero-downtime deployments with health checks
193 lines
5.0 KiB
PHP
193 lines
5.0 KiB
PHP
<?php
|
|
|
|
declare(strict_types=1);
|
|
|
|
require_once __DIR__ . '/../vendor/autoload.php';
|
|
|
|
use App\Framework\Worker\Every;
|
|
use App\Framework\Worker\Schedule;
|
|
|
|
/**
|
|
* Example: Scheduled Job that runs every 5 minutes
|
|
*
|
|
* The #[Schedule] attribute marks this class for automatic registration
|
|
* with the Worker's scheduler system.
|
|
*
|
|
* The Worker will:
|
|
* 1. Discover this class via ScheduleDiscoveryService on startup
|
|
* 2. Register it with SchedulerService using an IntervalSchedule
|
|
* 3. Execute the handle() method every 5 minutes
|
|
*/
|
|
#[Schedule(at: new Every(minutes: 5))]
|
|
final class CleanupTempFilesJob
|
|
{
|
|
/**
|
|
* This method is called by the scheduler when the job is due
|
|
*/
|
|
public function handle(): array
|
|
{
|
|
echo "[" . date('Y-m-d H:i:s') . "] Running CleanupTempFilesJob\n";
|
|
|
|
// Your cleanup logic here
|
|
$deletedFiles = $this->cleanupOldTempFiles();
|
|
|
|
return [
|
|
'status' => 'success',
|
|
'deleted_files' => $deletedFiles,
|
|
'executed_at' => time()
|
|
];
|
|
}
|
|
|
|
private function cleanupOldTempFiles(): int
|
|
{
|
|
// Example cleanup logic
|
|
$tempDir = sys_get_temp_dir();
|
|
$deletedCount = 0;
|
|
|
|
// Delete files older than 1 hour
|
|
$files = glob($tempDir . '/*.tmp');
|
|
foreach ($files as $file) {
|
|
if (file_exists($file) && (time() - filemtime($file)) > 3600) {
|
|
unlink($file);
|
|
$deletedCount++;
|
|
}
|
|
}
|
|
|
|
return $deletedCount;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Example: Hourly data aggregation job
|
|
*
|
|
* This job runs every hour and aggregates analytics data
|
|
*/
|
|
#[Schedule(at: new Every(hours: 1))]
|
|
final class AggregateAnalyticsJob
|
|
{
|
|
public function handle(): array
|
|
{
|
|
echo "[" . date('Y-m-d H:i:s') . "] Running AggregateAnalyticsJob\n";
|
|
|
|
// Your aggregation logic here
|
|
$recordsProcessed = $this->aggregateLastHourData();
|
|
|
|
return [
|
|
'status' => 'success',
|
|
'records_processed' => $recordsProcessed,
|
|
'executed_at' => time()
|
|
];
|
|
}
|
|
|
|
private function aggregateLastHourData(): int
|
|
{
|
|
// Example aggregation logic
|
|
return rand(100, 1000);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Example: Daily backup job
|
|
*
|
|
* This job runs once per day
|
|
*/
|
|
#[Schedule(at: new Every(days: 1))]
|
|
final class DailyBackupJob
|
|
{
|
|
public function handle(): array
|
|
{
|
|
echo "[" . date('Y-m-d H:i:s') . "] Running DailyBackupJob\n";
|
|
|
|
// Your backup logic here
|
|
$backupSize = $this->createDatabaseBackup();
|
|
|
|
return [
|
|
'status' => 'success',
|
|
'backup_size_mb' => $backupSize,
|
|
'executed_at' => time()
|
|
];
|
|
}
|
|
|
|
private function createDatabaseBackup(): float
|
|
{
|
|
// Example backup logic
|
|
return round(rand(50, 200) / 10, 2);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Example: Callable job (using __invoke)
|
|
*
|
|
* Jobs can also be callable instead of using handle() method
|
|
*/
|
|
#[Schedule(at: new Every(minutes: 10))]
|
|
final class MonitorSystemHealthJob
|
|
{
|
|
public function __invoke(): string
|
|
{
|
|
echo "[" . date('Y-m-d H:i:s') . "] Running MonitorSystemHealthJob\n";
|
|
|
|
$memoryUsage = memory_get_usage(true) / 1024 / 1024;
|
|
$cpuLoad = sys_getloadavg()[0];
|
|
|
|
return "System healthy - Memory: {$memoryUsage}MB, CPU Load: {$cpuLoad}";
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Example: Complex schedule with multiple time units
|
|
*/
|
|
#[Schedule(at: new Every(days: 1, hours: 2, minutes: 30))]
|
|
final class WeeklyReportJob
|
|
{
|
|
public function handle(): array
|
|
{
|
|
echo "[" . date('Y-m-d H:i:s') . "] Running WeeklyReportJob\n";
|
|
|
|
// This runs every 1 day, 2 hours, 30 minutes
|
|
// Total: (1 * 86400) + (2 * 3600) + (30 * 60) = 94200 seconds
|
|
|
|
return [
|
|
'status' => 'success',
|
|
'report_generated' => true,
|
|
'executed_at' => time()
|
|
];
|
|
}
|
|
}
|
|
|
|
echo <<<'INFO'
|
|
=== Scheduled Jobs Example ===
|
|
|
|
This example shows how to create scheduled jobs using the #[Schedule] attribute.
|
|
|
|
How it works:
|
|
1. Mark your job class with #[Schedule(at: new Every(...))]
|
|
2. Implement either a handle() method or make your class callable (__invoke)
|
|
3. The Worker will automatically discover and register your job on startup
|
|
4. The job will execute at the specified interval
|
|
|
|
Available Every time units:
|
|
- Every(days: 1) - Run once per day
|
|
- Every(hours: 1) - Run once per hour
|
|
- Every(minutes: 5) - Run every 5 minutes
|
|
- Every(seconds: 30) - Run every 30 seconds
|
|
- Combine multiple units: Every(days: 1, hours: 2, minutes: 30)
|
|
|
|
Task ID Generation:
|
|
Job class names are automatically converted to kebab-case task IDs:
|
|
- CleanupTempFilesJob -> cleanup-temp-files-job
|
|
- AggregateAnalyticsJob -> aggregate-analytics-job
|
|
- DailyBackupJob -> daily-backup-job
|
|
|
|
Starting the Worker:
|
|
To run these scheduled jobs, start the Worker:
|
|
docker exec php php console.php worker:start
|
|
|
|
The Worker will:
|
|
- Discover all classes with #[Schedule] attribute
|
|
- Register them with the SchedulerService
|
|
- Check for due tasks every 10 seconds
|
|
- Execute tasks and log results
|
|
|
|
INFO;
|