Files
michaelschiemer/tests/Performance/MachineLearning/MLManagementPerformanceTest.php
Michael Schiemer 3b623e7afb feat(Deployment): Integrate Ansible deployment via PHP deployment pipeline
- Create AnsibleDeployStage using framework's Process module for secure command execution
- Integrate AnsibleDeployStage into DeploymentPipelineCommands for production deployments
- Add force_deploy flag support in Ansible playbook to override stale locks
- Use PHP deployment module as orchestrator (php console.php deploy:production)
- Fix ErrorAggregationInitializer to use Environment class instead of $_ENV superglobal

Architecture:
- BuildStage → AnsibleDeployStage → HealthCheckStage for production
- Process module provides timeout, error handling, and output capture
- Ansible playbook supports rollback via rollback-git-based.yml
- Zero-downtime deployments with health checks
2025-10-26 14:08:07 +01:00

374 lines
12 KiB
PHP
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
<?php
declare(strict_types=1);
/**
* ML Management System Performance Tests
*
* Benchmarks for Database-backed ML Management components:
* - DatabaseModelRegistry performance
* - DatabasePerformanceStorage throughput
* - Model lookup latency
* - Bulk operations efficiency
*
* Performance Baselines (Target):
* - Model registration: <10ms
* - Model lookup: <5ms
* - Prediction storage: <15ms
* - Bulk prediction insert (100): <500ms
* - Accuracy calculation (1000 records): <100ms
*/
require __DIR__ . '/../../../vendor/autoload.php';
use App\Framework\Core\ContainerBootstrapper;
use App\Framework\DI\DefaultContainer;
use App\Framework\Performance\EnhancedPerformanceCollector;
use App\Framework\Config\Environment;
use App\Framework\Context\ExecutionContext;
use App\Framework\Database\ValueObjects\SqlQuery;
use App\Framework\Database\ConnectionInterface;
use App\Framework\MachineLearning\ModelManagement\DatabaseModelRegistry;
use App\Framework\MachineLearning\ModelManagement\DatabasePerformanceStorage;
use App\Framework\MachineLearning\ModelManagement\ValueObjects\ModelMetadata;
use App\Framework\MachineLearning\ModelManagement\ValueObjects\ModelType;
use App\Framework\Core\ValueObjects\Version;
use App\Framework\Core\ValueObjects\Timestamp;
use App\Framework\Core\ValueObjects\Duration;
// Bootstrap container
$performanceCollector = new EnhancedPerformanceCollector(
new \App\Framework\DateTime\SystemClock(),
new \App\Framework\DateTime\SystemHighResolutionClock(),
new \App\Framework\Performance\MemoryMonitor()
);
$container = new DefaultContainer();
$env = Environment::fromFile(__DIR__ . '/../../../.env');
$container->instance(Environment::class, $env);
$executionContext = ExecutionContext::forTest();
$container->instance(ExecutionContext::class, $executionContext);
$bootstrapper = new ContainerBootstrapper($container);
$container = $bootstrapper->bootstrap('/var/www/html', $performanceCollector);
if (!function_exists('container')) {
function container() {
global $container;
return $container;
}
}
// Color output helpers
function green(string $text): string {
return "\033[32m{$text}\033[0m";
}
function red(string $text): string {
return "\033[31m{$text}\033[0m";
}
function yellow(string $text): string {
return "\033[33m{$text}\033[0m";
}
function blue(string $text): string {
return "\033[34m{$text}\033[0m";
}
function cyan(string $text): string {
return "\033[36m{$text}\033[0m";
}
// Performance tracking
$benchmarks = [];
function benchmark(string $name, callable $fn, int $iterations = 1): array
{
global $benchmarks;
$times = [];
$memoryBefore = memory_get_usage(true);
for ($i = 0; $i < $iterations; $i++) {
$start = microtime(true);
$fn();
$end = microtime(true);
$times[] = ($end - $start) * 1000; // Convert to milliseconds
}
$memoryAfter = memory_get_usage(true);
$memoryUsed = ($memoryAfter - $memoryBefore) / 1024 / 1024; // MB
$avgTime = array_sum($times) / count($times);
$minTime = min($times);
$maxTime = max($times);
$result = [
'name' => $name,
'iterations' => $iterations,
'avg_time_ms' => round($avgTime, 2),
'min_time_ms' => round($minTime, 2),
'max_time_ms' => round($maxTime, 2),
'memory_mb' => round($memoryUsed, 2),
'throughput' => $iterations > 1 ? round(1000 / $avgTime, 2) : null,
];
$benchmarks[] = $result;
return $result;
}
function printBenchmark(array $result, ?float $baselineMs = null): void
{
$name = str_pad($result['name'], 50, '.');
$avgTime = str_pad($result['avg_time_ms'] . 'ms', 10, ' ', STR_PAD_LEFT);
// Color based on baseline
if ($baselineMs !== null) {
$color = $result['avg_time_ms'] <= $baselineMs ? 'green' : 'red';
$status = $result['avg_time_ms'] <= $baselineMs ? '✓' : '✗';
echo $color("$status ") . "$name " . $color($avgTime);
} else {
echo cyan(" ") . "$name " . cyan($avgTime);
}
if ($result['throughput']) {
echo yellow(" ({$result['throughput']} ops/sec)");
}
echo "\n";
}
echo blue("╔════════════════════════════════════════════════════════════╗\n");
echo blue("║ ML Management System Performance Benchmarks ║\n");
echo blue("╚════════════════════════════════════════════════════════════╝\n\n");
// Get services
$connection = $container->get(ConnectionInterface::class);
$registry = $container->get(DatabaseModelRegistry::class);
$storage = $container->get(DatabasePerformanceStorage::class);
// Clean up test data
echo yellow("Preparing test environment...\n");
$connection->execute(SqlQuery::create('DELETE FROM ml_models WHERE model_name LIKE ?', ['perf-test-%']));
$connection->execute(SqlQuery::create('DELETE FROM ml_predictions WHERE model_name LIKE ?', ['perf-test-%']));
$connection->execute(SqlQuery::create('DELETE FROM ml_confidence_baselines WHERE model_name LIKE ?', ['perf-test-%']));
echo "\n" . blue("═══ DatabaseModelRegistry Benchmarks ═══\n\n");
// Benchmark 1: Single Model Registration
$result = benchmark('Model Registration (single)', function() use ($registry) {
static $counter = 0;
$counter++;
$metadata = new ModelMetadata(
modelName: "perf-test-model-{$counter}",
modelType: ModelType::SUPERVISED,
version: new Version(1, 0, 0),
configuration: ['layers' => 3, 'neurons' => 128],
performanceMetrics: ['accuracy' => 0.95],
createdAt: Timestamp::now(),
deployedAt: Timestamp::now(),
environment: 'production'
);
$registry->register($metadata);
}, 100);
printBenchmark($result, 10.0); // Baseline: <10ms
// Benchmark 2: Model Lookup by Name and Version
$testModel = new ModelMetadata(
modelName: 'perf-test-lookup',
modelType: ModelType::SUPERVISED,
version: new Version(1, 0, 0),
configuration: [],
performanceMetrics: [],
createdAt: Timestamp::now(),
deployedAt: Timestamp::now(),
environment: 'production'
);
$registry->register($testModel);
$result = benchmark('Model Lookup (by name + version)', function() use ($registry) {
$registry->get('perf-test-lookup', new Version(1, 0, 0));
}, 500);
printBenchmark($result, 5.0); // Baseline: <5ms
// Benchmark 3: Get Latest Model
$result = benchmark('Model Lookup (latest)', function() use ($registry) {
$registry->getLatest('perf-test-lookup');
}, 500);
printBenchmark($result, 5.0); // Baseline: <5ms
// Benchmark 4: Get All Models for Name
for ($i = 0; $i < 10; $i++) {
$metadata = new ModelMetadata(
modelName: 'perf-test-multi',
modelType: ModelType::SUPERVISED,
version: new Version(1, $i, 0),
configuration: [],
performanceMetrics: [],
createdAt: Timestamp::now(),
deployedAt: null,
environment: 'development'
);
$registry->register($metadata);
}
$result = benchmark('Get All Models (10 versions)', function() use ($registry) {
$registry->getAll('perf-test-multi');
}, 200);
printBenchmark($result, 15.0); // Baseline: <15ms
echo "\n" . blue("═══ DatabasePerformanceStorage Benchmarks ═══\n\n");
// Benchmark 5: Single Prediction Storage
$result = benchmark('Prediction Storage (single)', function() use ($storage) {
static $counter = 0;
$counter++;
$record = [
'model_name' => 'perf-test-predictions',
'version' => '1.0.0',
'prediction' => ['class' => 'A', 'confidence' => 0.9],
'actual' => ['class' => 'A'],
'confidence' => 0.9,
'features' => ['feature1' => 100, 'feature2' => 200],
'timestamp' => Timestamp::now(),
'is_correct' => true,
];
$storage->storePrediction($record);
}, 100);
printBenchmark($result, 15.0); // Baseline: <15ms
// Benchmark 6: Bulk Prediction Storage
$result = benchmark('Prediction Storage (bulk 100)', function() use ($storage) {
static $batchCounter = 0;
$batchCounter++;
for ($i = 0; $i < 100; $i++) {
$record = [
'model_name' => "perf-test-bulk-{$batchCounter}",
'version' => '1.0.0',
'prediction' => ['class' => 'A'],
'actual' => ['class' => 'A'],
'confidence' => 0.85,
'features' => ['f1' => $i],
'timestamp' => Timestamp::now(),
'is_correct' => true,
];
$storage->storePrediction($record);
}
}, 5);
printBenchmark($result, 500.0); // Baseline: <500ms
// Benchmark 7: Get Recent Predictions
for ($i = 0; $i < 100; $i++) {
$record = [
'model_name' => 'perf-test-recent',
'version' => '1.0.0',
'prediction' => ['class' => 'A'],
'actual' => ['class' => 'A'],
'confidence' => 0.85,
'features' => [],
'timestamp' => Timestamp::now(),
'is_correct' => true,
];
$storage->storePrediction($record);
}
$result = benchmark('Get Recent Predictions (100)', function() use ($storage) {
$storage->getRecentPredictions('perf-test-recent', new Version(1, 0, 0), 100);
}, 100);
printBenchmark($result, 20.0); // Baseline: <20ms
// Benchmark 8: Calculate Accuracy (1000 records)
for ($i = 0; $i < 1000; $i++) {
$record = [
'model_name' => 'perf-test-accuracy',
'version' => '1.0.0',
'prediction' => ['class' => 'A'],
'actual' => ['class' => ($i % 4 === 0) ? 'B' : 'A'], // 75% accuracy
'confidence' => 0.85,
'features' => [],
'timestamp' => Timestamp::now(),
'is_correct' => ($i % 4 !== 0),
];
$storage->storePrediction($record);
}
$result = benchmark('Calculate Accuracy (1000 records)', function() use ($storage) {
$storage->calculateAccuracy('perf-test-accuracy', new Version(1, 0, 0), 1000);
}, 50);
printBenchmark($result, 100.0); // Baseline: <100ms
// Benchmark 9: Confidence Baseline Storage
$result = benchmark('Confidence Baseline Storage', function() use ($storage) {
static $counter = 0;
$counter++;
$storage->storeConfidenceBaseline(
"perf-test-baseline-{$counter}",
new Version(1, 0, 0),
0.85,
0.12
);
}, 100);
printBenchmark($result, 10.0); // Baseline: <10ms
// Benchmark 10: Confidence Baseline Retrieval
$storage->storeConfidenceBaseline('perf-test-baseline-get', new Version(1, 0, 0), 0.85, 0.12);
$result = benchmark('Confidence Baseline Retrieval', function() use ($storage) {
$storage->getConfidenceBaseline('perf-test-baseline-get', new Version(1, 0, 0));
}, 500);
printBenchmark($result, 5.0); // Baseline: <5ms
// Summary
echo "\n" . blue("═══ Performance Summary ═══\n\n");
$totalTests = count($benchmarks);
$passedTests = 0;
foreach ($benchmarks as $benchmark) {
// Define baseline for each test
$baselines = [
'Model Registration (single)' => 10.0,
'Model Lookup (by name + version)' => 5.0,
'Model Lookup (latest)' => 5.0,
'Get All Models (10 versions)' => 15.0,
'Prediction Storage (single)' => 15.0,
'Prediction Storage (bulk 100)' => 500.0,
'Get Recent Predictions (100)' => 20.0,
'Calculate Accuracy (1000 records)' => 100.0,
'Confidence Baseline Storage' => 10.0,
'Confidence Baseline Retrieval' => 5.0,
];
$baseline = $baselines[$benchmark['name']] ?? null;
if ($baseline && $benchmark['avg_time_ms'] <= $baseline) {
$passedTests++;
}
}
echo green("Passed: {$passedTests}/{$totalTests}\n");
if ($passedTests < $totalTests) {
echo red("Failed: " . ($totalTests - $passedTests) . "/{$totalTests}\n");
} else {
echo green("All performance benchmarks passed! ✓\n");
}
echo "\n" . cyan("Memory Usage: " . round(memory_get_peak_usage(true) / 1024 / 1024, 2) . " MB\n");
// Clean up
echo "\n" . yellow("Cleaning up test data...\n");
$connection->execute(SqlQuery::create('DELETE FROM ml_models WHERE model_name LIKE ?', ['perf-test-%']));
$connection->execute(SqlQuery::create('DELETE FROM ml_predictions WHERE model_name LIKE ?', ['perf-test-%']));
$connection->execute(SqlQuery::create('DELETE FROM ml_confidence_baselines WHERE model_name LIKE ?', ['perf-test-%']));
exit($passedTests === $totalTests ? 0 : 1);