Files
michaelschiemer/tests/run-ml-tests.php
Michael Schiemer 3b623e7afb feat(Deployment): Integrate Ansible deployment via PHP deployment pipeline
- Create AnsibleDeployStage using framework's Process module for secure command execution
- Integrate AnsibleDeployStage into DeploymentPipelineCommands for production deployments
- Add force_deploy flag support in Ansible playbook to override stale locks
- Use PHP deployment module as orchestrator (php console.php deploy:production)
- Fix ErrorAggregationInitializer to use Environment class instead of $_ENV superglobal

Architecture:
- BuildStage → AnsibleDeployStage → HealthCheckStage for production
- Process module provides timeout, error handling, and output capture
- Ansible playbook supports rollback via rollback-git-based.yml
- Zero-downtime deployments with health checks
2025-10-26 14:08:07 +01:00

332 lines
10 KiB
PHP

<?php
declare(strict_types=1);
/**
* Simple Test Runner for ML Management System Integration Tests
*
* This manually runs the integration tests without requiring Pest or PHPUnit
*/
require __DIR__ . '/../vendor/autoload.php';
use App\Framework\Core\ContainerBootstrapper;
use App\Framework\DI\DefaultContainer;
use App\Framework\Performance\EnhancedPerformanceCollector;
use App\Framework\Config\Environment;
use App\Framework\Context\ExecutionContext;
use App\Framework\Database\ValueObjects\SqlQuery;
use App\Framework\Database\ConnectionInterface;
// Bootstrap container (following AppBootstrapper pattern)
$performanceCollector = new EnhancedPerformanceCollector(
new \App\Framework\DateTime\SystemClock(),
new \App\Framework\DateTime\SystemHighResolutionClock(),
new \App\Framework\Performance\MemoryMonitor()
);
// Create container first
$container = new DefaultContainer();
// Initialize Environment
$env = Environment::fromFile(__DIR__ . '/../.env');
$container->instance(Environment::class, $env);
// Initialize ExecutionContext for tests
$executionContext = ExecutionContext::forTest();
$container->instance(ExecutionContext::class, $executionContext);
// Now bootstrap
$bootstrapper = new ContainerBootstrapper($container);
$container = $bootstrapper->bootstrap('/var/www/html', $performanceCollector);
// Set global container function
if (!function_exists('container')) {
function container() {
global $container;
return $container;
}
}
// Color output helpers
function green(string $text): string {
return "\033[32m{$text}\033[0m";
}
function red(string $text): string {
return "\033[31m{$text}\033[0m";
}
function yellow(string $text): string {
return "\033[33m{$text}\033[0m";
}
function blue(string $text): string {
return "\033[34m{$text}\033[0m";
}
// Test runner
$passed = 0;
$failed = 0;
$errors = [];
echo blue("=== ML Management System Integration Tests ===\n\n");
// Get services from container
$connection = $container->get(\App\Framework\Database\ConnectionInterface::class);
$registry = $container->get(\App\Framework\MachineLearning\ModelManagement\DatabaseModelRegistry::class);
$storage = $container->get(\App\Framework\MachineLearning\ModelManagement\DatabasePerformanceStorage::class);
// Clean up test data
echo yellow("Cleaning up test data...\n");
$connection->execute(
SqlQuery::create(
'DELETE FROM ml_models WHERE model_name LIKE ?',
['test-%']
)
);
$connection->execute(
SqlQuery::create(
'DELETE FROM ml_predictions WHERE model_name LIKE ?',
['test-%']
)
);
$connection->execute(
SqlQuery::create(
'DELETE FROM ml_confidence_baselines WHERE model_name LIKE ?',
['test-%']
)
);
// Test 1: Register a new model
echo "\nTest 1: Can register a new model in database... ";
try {
$metadata = new \App\Framework\MachineLearning\ModelManagement\ValueObjects\ModelMetadata(
modelName: 'test-sentiment-analyzer',
modelType: \App\Framework\MachineLearning\ModelManagement\ValueObjects\ModelType::SUPERVISED,
version: new \App\Framework\Core\ValueObjects\Version(1, 0, 0),
configuration: ['hidden_layers' => 3, 'learning_rate' => 0.001],
performanceMetrics: ['accuracy' => 0.95, 'precision' => 0.93],
createdAt: \App\Framework\Core\ValueObjects\Timestamp::now(),
deployedAt: \App\Framework\Core\ValueObjects\Timestamp::now(),
environment: 'production',
metadata: ['description' => 'Test sentiment analysis model']
);
$registry->register($metadata);
// Verify
$retrieved = $registry->get('test-sentiment-analyzer', new \App\Framework\Core\ValueObjects\Version(1, 0, 0));
if ($retrieved !== null && $retrieved->modelName === 'test-sentiment-analyzer') {
echo green("✓ PASSED\n");
$passed++;
} else {
echo red("✗ FAILED\n");
$failed++;
$errors[] = "Model was not retrieved correctly";
}
} catch (\Throwable $e) {
echo red("✗ ERROR: " . $e->getMessage() . "\n");
$failed++;
$errors[] = $e->getMessage();
}
// Test 2: Store prediction records
echo "Test 2: Can store prediction records... ";
try {
$predictionRecord = [
'model_name' => 'test-predictor',
'version' => '1.0.0',
'prediction' => ['class' => 'positive', 'probability' => 0.85],
'actual' => ['class' => 'positive'],
'confidence' => 0.85,
'features' => ['text_length' => 150, 'sentiment_score' => 0.7],
'timestamp' => \App\Framework\Core\ValueObjects\Timestamp::now(),
'is_correct' => true,
];
$storage->storePrediction($predictionRecord);
// Verify
$recentPredictions = $storage->getRecentPredictions(
'test-predictor',
new \App\Framework\Core\ValueObjects\Version(1, 0, 0),
100
);
if (count($recentPredictions) === 1 && $recentPredictions[0]['confidence'] == 0.85) {
echo green("✓ PASSED\n");
$passed++;
} else {
echo red("✗ FAILED\n");
$failed++;
$errors[] = "Prediction was not stored correctly";
}
} catch (\Throwable $e) {
echo red("✗ ERROR: " . $e->getMessage() . "\n");
$failed++;
$errors[] = $e->getMessage();
}
// Test 3: Calculate accuracy
echo "Test 3: Can calculate accuracy from predictions... ";
try {
$modelName = 'test-accuracy-model';
$version = new \App\Framework\Core\ValueObjects\Version(1, 0, 0);
// Store multiple predictions
$predictions = [
['prediction' => 'A', 'actual' => 'A', 'correct' => true, 'confidence' => 0.9],
['prediction' => 'B', 'actual' => 'B', 'correct' => true, 'confidence' => 0.85],
['prediction' => 'A', 'actual' => 'B', 'correct' => false, 'confidence' => 0.6],
['prediction' => 'C', 'actual' => 'C', 'correct' => true, 'confidence' => 0.95],
];
foreach ($predictions as $pred) {
$record = [
'model_name' => $modelName,
'version' => $version->toString(),
'prediction' => ['class' => $pred['prediction']],
'actual' => ['class' => $pred['actual']],
'confidence' => $pred['confidence'],
'features' => [],
'timestamp' => \App\Framework\Core\ValueObjects\Timestamp::now(),
'is_correct' => $pred['correct'],
];
$storage->storePrediction($record);
}
// Calculate accuracy (should be 3/4 = 0.75)
$accuracy = $storage->calculateAccuracy($modelName, $version, 100);
if (abs($accuracy - 0.75) < 0.01) {
echo green("✓ PASSED\n");
$passed++;
} else {
echo red("✗ FAILED (expected 0.75, got {$accuracy})\n");
$failed++;
$errors[] = "Accuracy calculation incorrect: expected 0.75, got {$accuracy}";
}
} catch (\Throwable $e) {
echo red("✗ ERROR: " . $e->getMessage() . "\n");
$failed++;
$errors[] = $e->getMessage();
}
// Test 4: Store and retrieve confidence baseline
echo "Test 4: Can store and retrieve confidence baseline... ";
try {
$modelName = 'test-baseline-model';
$version = new \App\Framework\Core\ValueObjects\Version(1, 2, 3);
$storage->storeConfidenceBaseline(
$modelName,
$version,
avgConfidence: 0.82,
stdDevConfidence: 0.12
);
$baseline = $storage->getConfidenceBaseline($modelName, $version);
if ($baseline !== null && abs($baseline['avg_confidence'] - 0.82) < 0.01) {
echo green("✓ PASSED\n");
$passed++;
} else {
echo red("✗ FAILED\n");
$failed++;
$errors[] = "Confidence baseline not stored/retrieved correctly";
}
} catch (\Throwable $e) {
echo red("✗ ERROR: " . $e->getMessage() . "\n");
$failed++;
$errors[] = $e->getMessage();
}
// Test 5: MLConfig integration
echo "Test 5: MLConfig can detect drift... ";
try {
$config = \App\Framework\MachineLearning\ModelManagement\MLConfig::production();
$lowDrift = $config->isDriftDetected(0.10); // Below threshold (0.15)
$highDrift = $config->isDriftDetected(0.20); // Above threshold
if ($lowDrift === false && $highDrift === true) {
echo green("✓ PASSED\n");
$passed++;
} else {
echo red("✗ FAILED\n");
$failed++;
$errors[] = "Drift detection logic incorrect";
}
} catch (\Throwable $e) {
echo red("✗ ERROR: " . $e->getMessage() . "\n");
$failed++;
$errors[] = $e->getMessage();
}
// Test 6: Notification alerting service
echo "Test 6: Can send alerts via NotificationAlertingService... ";
try {
// Use NullNotificationDispatcher for testing (no-op implementation)
$dispatcher = new \App\Framework\Notification\NullNotificationDispatcher();
$config = \App\Framework\MachineLearning\ModelManagement\MLConfig::development();
$alerting = new \App\Framework\MachineLearning\ModelManagement\NotificationAlertingService(
$dispatcher,
$config,
'test-admin'
);
// Send test alert - should not throw
$alerting->sendAlert(
'warning',
'Test Alert',
'This is a test alert message',
['test_data' => 'value']
);
echo green("✓ PASSED\n");
$passed++;
} catch (\Throwable $e) {
echo red("✗ ERROR: " . $e->getMessage() . "\n");
$failed++;
$errors[] = $e->getMessage();
}
// Clean up test data
echo yellow("\nCleaning up test data...\n");
$connection->execute(
SqlQuery::create(
'DELETE FROM ml_models WHERE model_name LIKE ?',
['test-%']
)
);
$connection->execute(
SqlQuery::create(
'DELETE FROM ml_predictions WHERE model_name LIKE ?',
['test-%']
)
);
$connection->execute(
SqlQuery::create(
'DELETE FROM ml_confidence_baselines WHERE model_name LIKE ?',
['test-%']
)
);
// Summary
echo "\n" . blue("=== Test Summary ===\n");
echo green("Passed: {$passed}\n");
echo ($failed > 0 ? red("Failed: {$failed}\n") : "Failed: 0\n");
echo "Total: " . ($passed + $failed) . "\n";
if ($failed > 0) {
echo "\n" . red("=== Errors ===\n");
foreach ($errors as $i => $error) {
echo red(($i + 1) . ". {$error}\n");
}
}
exit($failed > 0 ? 1 : 0);