Add full Oculog codebase
This commit is contained in:
7
server/backend/.dockerignore
Normal file
7
server/backend/.dockerignore
Normal file
@@ -0,0 +1,7 @@
|
||||
node_modules
|
||||
npm-debug.log
|
||||
.env
|
||||
.git
|
||||
.gitignore
|
||||
README.md
|
||||
|
||||
19
server/backend/Dockerfile
Normal file
19
server/backend/Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM node:18-alpine
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy package files
|
||||
COPY package*.json ./
|
||||
|
||||
# Install dependencies
|
||||
RUN npm install
|
||||
|
||||
# Copy application code
|
||||
COPY . .
|
||||
|
||||
# Expose port
|
||||
EXPOSE 3001
|
||||
|
||||
# Start the application
|
||||
CMD ["npm", "start"]
|
||||
|
||||
125
server/backend/db/init.sql
Normal file
125
server/backend/db/init.sql
Normal file
@@ -0,0 +1,125 @@
|
||||
-- Create metrics table
|
||||
CREATE TABLE IF NOT EXISTS metrics (
|
||||
id SERIAL PRIMARY KEY,
|
||||
server_id VARCHAR(255) NOT NULL,
|
||||
timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
cpu_usage DECIMAL(5,2),
|
||||
cpu_cores INTEGER,
|
||||
memory_total DECIMAL(10,2),
|
||||
memory_used DECIMAL(10,2),
|
||||
memory_available DECIMAL(10,2),
|
||||
memory_percent DECIMAL(5,2),
|
||||
disk_total DECIMAL(10,2),
|
||||
disk_used DECIMAL(10,2),
|
||||
disk_free DECIMAL(10,2),
|
||||
disk_percent DECIMAL(5,2),
|
||||
disks JSONB,
|
||||
network_rx DECIMAL(10,2),
|
||||
network_tx DECIMAL(10,2),
|
||||
network_rx_total DECIMAL(10,2),
|
||||
network_tx_total DECIMAL(10,2),
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Create indexes for better query performance
|
||||
CREATE INDEX IF NOT EXISTS idx_metrics_server_id ON metrics(server_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_metrics_timestamp ON metrics(timestamp);
|
||||
CREATE INDEX IF NOT EXISTS idx_metrics_server_timestamp ON metrics(server_id, timestamp DESC);
|
||||
|
||||
-- Migration: Add disks column if it doesn't exist (for existing databases)
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'metrics' AND column_name = 'disks'
|
||||
) THEN
|
||||
ALTER TABLE metrics ADD COLUMN disks JSONB;
|
||||
CREATE INDEX IF NOT EXISTS idx_metrics_disks ON metrics USING GIN (disks);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Create index for disks column (will be created if column exists)
|
||||
CREATE INDEX IF NOT EXISTS idx_metrics_disks ON metrics USING GIN (disks);
|
||||
|
||||
-- Migration: Add load average columns if they don't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'metrics' AND column_name = 'load_avg_1min'
|
||||
) THEN
|
||||
ALTER TABLE metrics ADD COLUMN load_avg_1min DECIMAL(5,2);
|
||||
ALTER TABLE metrics ADD COLUMN load_avg_5min DECIMAL(5,2);
|
||||
ALTER TABLE metrics ADD COLUMN load_avg_15min DECIMAL(5,2);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Migration: Add swap columns if they don't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'metrics' AND column_name = 'swap_total'
|
||||
) THEN
|
||||
ALTER TABLE metrics ADD COLUMN swap_total DECIMAL(10,2);
|
||||
ALTER TABLE metrics ADD COLUMN swap_used DECIMAL(10,2);
|
||||
ALTER TABLE metrics ADD COLUMN swap_free DECIMAL(10,2);
|
||||
ALTER TABLE metrics ADD COLUMN swap_percent DECIMAL(5,2);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Migration: Add process_count and uptime columns if they don't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'metrics' AND column_name = 'process_count'
|
||||
) THEN
|
||||
ALTER TABLE metrics ADD COLUMN process_count INTEGER;
|
||||
ALTER TABLE metrics ADD COLUMN uptime_seconds DECIMAL(10,2);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Create servers table to track server metadata
|
||||
CREATE TABLE IF NOT EXISTS servers (
|
||||
server_id VARCHAR(255) PRIMARY KEY,
|
||||
hostname VARCHAR(255),
|
||||
first_seen TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
last_seen TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
total_metrics_count INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
-- Create function to update server last_seen and metrics count
|
||||
CREATE OR REPLACE FUNCTION update_server_stats()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
INSERT INTO servers (server_id, hostname, first_seen, last_seen, total_metrics_count)
|
||||
VALUES (NEW.server_id, NEW.server_id, NOW(), NOW(), 1)
|
||||
ON CONFLICT (server_id) DO UPDATE
|
||||
SET last_seen = NOW(),
|
||||
total_metrics_count = servers.total_metrics_count + 1;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Create trigger to automatically update server stats
|
||||
DROP TRIGGER IF EXISTS trigger_update_server_stats ON metrics;
|
||||
CREATE TRIGGER trigger_update_server_stats
|
||||
AFTER INSERT ON metrics
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_server_stats();
|
||||
|
||||
-- Create API keys table for client authentication
|
||||
CREATE TABLE IF NOT EXISTS api_keys (
|
||||
id SERIAL PRIMARY KEY,
|
||||
key_hash VARCHAR(255) NOT NULL UNIQUE,
|
||||
server_id VARCHAR(255),
|
||||
name VARCHAR(255),
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
last_used TIMESTAMP WITH TIME ZONE,
|
||||
is_active BOOLEAN DEFAULT TRUE
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_api_keys_hash ON api_keys(key_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_api_keys_server_id ON api_keys(server_id);
|
||||
|
||||
8
server/backend/db/migration_add_disks.sql
Normal file
8
server/backend/db/migration_add_disks.sql
Normal file
@@ -0,0 +1,8 @@
|
||||
-- Migration: Add disks column to metrics table
|
||||
-- This allows storing multiple disk metrics as JSON
|
||||
|
||||
ALTER TABLE metrics ADD COLUMN IF NOT EXISTS disks JSONB;
|
||||
|
||||
-- Create index on disks column for better query performance
|
||||
CREATE INDEX IF NOT EXISTS idx_metrics_disks ON metrics USING GIN (disks);
|
||||
|
||||
17
server/backend/db/migration_add_server_info.sql
Normal file
17
server/backend/db/migration_add_server_info.sql
Normal file
@@ -0,0 +1,17 @@
|
||||
-- Migration: Add server_info column to servers table
|
||||
-- This column stores server metadata: OS info, live status, top processes, containers, IP info
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'servers' AND column_name = 'server_info'
|
||||
) THEN
|
||||
ALTER TABLE servers ADD COLUMN server_info JSONB;
|
||||
CREATE INDEX IF NOT EXISTS idx_servers_server_info ON servers USING GIN (server_info);
|
||||
RAISE NOTICE 'server_info column added to servers table';
|
||||
ELSE
|
||||
RAISE NOTICE 'server_info column already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
44
server/backend/db/migration_add_synthetic_monitors.sql
Normal file
44
server/backend/db/migration_add_synthetic_monitors.sql
Normal file
@@ -0,0 +1,44 @@
|
||||
-- Create synthetic_monitors table
|
||||
CREATE TABLE IF NOT EXISTS synthetic_monitors (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(255) NOT NULL,
|
||||
type VARCHAR(50) NOT NULL CHECK (type IN ('http_status', 'ping', 'port_check')),
|
||||
target VARCHAR(500) NOT NULL,
|
||||
expected_status INTEGER,
|
||||
port INTEGER,
|
||||
interval INTEGER NOT NULL DEFAULT 60,
|
||||
enabled BOOLEAN DEFAULT TRUE,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Create monitor_results table
|
||||
CREATE TABLE IF NOT EXISTS monitor_results (
|
||||
id SERIAL PRIMARY KEY,
|
||||
monitor_id INTEGER NOT NULL REFERENCES synthetic_monitors(id) ON DELETE CASCADE,
|
||||
status VARCHAR(20) NOT NULL CHECK (status IN ('success', 'failed')),
|
||||
response_time INTEGER,
|
||||
message TEXT,
|
||||
timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Create indexes for better query performance
|
||||
CREATE INDEX IF NOT EXISTS idx_monitor_results_monitor_id ON monitor_results(monitor_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_monitor_results_timestamp ON monitor_results(timestamp DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_synthetic_monitors_enabled ON synthetic_monitors(enabled) WHERE enabled = TRUE;
|
||||
|
||||
-- Create function to update updated_at timestamp
|
||||
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = NOW();
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Create trigger to automatically update updated_at
|
||||
DROP TRIGGER IF EXISTS trigger_update_synthetic_monitors_updated_at ON synthetic_monitors;
|
||||
CREATE TRIGGER trigger_update_synthetic_monitors_updated_at
|
||||
BEFORE UPDATE ON synthetic_monitors
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_at_column();
|
||||
1500
server/backend/package-lock.json
generated
Normal file
1500
server/backend/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
26
server/backend/package.json
Normal file
26
server/backend/package.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "oculog-backend",
|
||||
"version": "1.0.0",
|
||||
"description": "Backend API for Oculog metrics observability platform",
|
||||
"main": "src/index.js",
|
||||
"scripts": {
|
||||
"start": "node src/index.js",
|
||||
"dev": "nodemon src/index.js",
|
||||
"migrate": "node src/migrate-cli.js"
|
||||
},
|
||||
"keywords": ["metrics", "observability", "monitoring"],
|
||||
"author": "",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"express": "^4.18.2",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.3.1",
|
||||
"body-parser": "^1.20.2",
|
||||
"pg": "^8.11.3",
|
||||
"axios": "^1.6.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"nodemon": "^3.0.1"
|
||||
}
|
||||
}
|
||||
|
||||
271
server/backend/src/alertEvaluator.js
Normal file
271
server/backend/src/alertEvaluator.js
Normal file
@@ -0,0 +1,271 @@
|
||||
/**
|
||||
* Alert Evaluator
|
||||
* Evaluates alert policies against current server metrics and synthetic monitor results
|
||||
*/
|
||||
|
||||
const db = require('./db');
|
||||
const fs = require('fs').promises;
|
||||
const path = require('path');
|
||||
|
||||
/**
|
||||
* Evaluate server metric alerts
|
||||
*/
|
||||
async function evaluateServerMetricAlerts() {
|
||||
try {
|
||||
const policies = await db.getEnabledAlertPolicies();
|
||||
const serverMetricPolicies = policies.filter(p => p.type === 'server_metric');
|
||||
|
||||
if (serverMetricPolicies.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get latest metrics for all servers
|
||||
const latestMetrics = await db.getLatestMetrics();
|
||||
const servers = await db.getServers();
|
||||
|
||||
for (const policy of serverMetricPolicies) {
|
||||
const serversToCheck = policy.server_id
|
||||
? servers.filter(s => s.server_id === policy.server_id)
|
||||
: servers;
|
||||
|
||||
for (const server of serversToCheck) {
|
||||
const metrics = latestMetrics[server.server_id];
|
||||
if (!metrics) {
|
||||
// Check for server_not_reporting
|
||||
if (policy.metric_type === 'server_not_reporting') {
|
||||
const lastSeen = new Date(server.last_seen);
|
||||
const now = new Date();
|
||||
const minutesSinceLastSeen = (now - lastSeen) / (1000 * 60);
|
||||
|
||||
if (minutesSinceLastSeen >= policy.threshold) {
|
||||
await triggerAlert(policy, server.server_id,
|
||||
`Server ${server.server_id} has not reported metrics for ${Math.round(minutesSinceLastSeen)} minutes`);
|
||||
} else {
|
||||
await resolveAlertIfExists(policy, server.server_id);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Evaluate based on metric type
|
||||
let shouldAlert = false;
|
||||
let message = '';
|
||||
|
||||
switch (policy.metric_type) {
|
||||
case 'cpu_high':
|
||||
const cpuUsage = metrics.cpu?.usage || 0;
|
||||
shouldAlert = cpuUsage >= policy.threshold;
|
||||
if (shouldAlert) {
|
||||
message = `CPU usage is ${cpuUsage.toFixed(1)}% (threshold: ${policy.threshold}%)`;
|
||||
}
|
||||
break;
|
||||
|
||||
case 'ram_high':
|
||||
const memPercent = metrics.memory?.percent || 0;
|
||||
shouldAlert = memPercent >= policy.threshold;
|
||||
if (shouldAlert) {
|
||||
message = `RAM usage is ${memPercent.toFixed(1)}% (threshold: ${policy.threshold}%)`;
|
||||
}
|
||||
break;
|
||||
|
||||
case 'disk_used':
|
||||
const diskPercent = metrics.disk?.percent || 0;
|
||||
shouldAlert = diskPercent >= policy.threshold;
|
||||
if (shouldAlert) {
|
||||
message = `Disk usage is ${diskPercent.toFixed(1)}% (threshold: ${policy.threshold}%)`;
|
||||
}
|
||||
break;
|
||||
|
||||
case 'server_not_reporting':
|
||||
const lastSeen = new Date(server.last_seen);
|
||||
const now = new Date();
|
||||
const minutesSinceLastSeen = (now - lastSeen) / (1000 * 60);
|
||||
shouldAlert = minutesSinceLastSeen >= policy.threshold;
|
||||
if (shouldAlert) {
|
||||
message = `Server has not reported metrics for ${Math.round(minutesSinceLastSeen)} minutes`;
|
||||
}
|
||||
break;
|
||||
|
||||
case 'client_out_of_date':
|
||||
const clientVersion = await db.getClientVersion(server.server_id);
|
||||
if (clientVersion) {
|
||||
// Get latest client version (same logic as /api/servers/:serverId/info)
|
||||
let latestVersion = null;
|
||||
try {
|
||||
const possiblePaths = [
|
||||
path.join(__dirname, '../../../../clients/ubuntu/client.py'),
|
||||
path.join(__dirname, '../../../clients/ubuntu/client.py'),
|
||||
path.join(process.cwd(), 'clients/ubuntu/client.py'),
|
||||
'/app/clients/ubuntu/client.py'
|
||||
];
|
||||
|
||||
for (const p of possiblePaths) {
|
||||
try {
|
||||
const stats = await fs.stat(p);
|
||||
const mtime = new Date(stats.mtime);
|
||||
latestVersion = mtime.toISOString().slice(0, 16).replace('T', '-').replace(':', '-');
|
||||
break;
|
||||
} catch (e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (latestVersion && clientVersion < latestVersion) {
|
||||
shouldAlert = true;
|
||||
message = `Client version ${clientVersion} is out of date (latest: ${latestVersion})`;
|
||||
}
|
||||
} catch (e) {
|
||||
// If we can't check, don't alert
|
||||
shouldAlert = false;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (shouldAlert) {
|
||||
await triggerAlert(policy, server.server_id, message);
|
||||
} else {
|
||||
await resolveAlertIfExists(policy, server.server_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[Alert Evaluator] Error evaluating server metric alerts:', {
|
||||
message: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluate synthetic monitor alerts
|
||||
*/
|
||||
async function evaluateSyntheticMonitorAlerts() {
|
||||
try {
|
||||
const policies = await db.getEnabledAlertPolicies();
|
||||
const monitorPolicies = policies.filter(p => p.type === 'synthetic_monitor');
|
||||
|
||||
if (monitorPolicies.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get all monitors with their last results
|
||||
const monitors = await db.getSyntheticMonitors();
|
||||
|
||||
for (const policy of monitorPolicies) {
|
||||
const monitorsToCheck = policy.monitor_id
|
||||
? monitors.filter(m => m.id === policy.monitor_id)
|
||||
: monitors;
|
||||
|
||||
for (const monitor of monitorsToCheck) {
|
||||
if (!monitor.last_result) {
|
||||
continue; // No results yet
|
||||
}
|
||||
|
||||
// Check if monitor failed
|
||||
if (monitor.last_result.status === 'failed') {
|
||||
await triggerAlert(policy, `Monitor ${monitor.id}`,
|
||||
`Synthetic monitor "${monitor.name}" failed: ${monitor.last_result.message || 'Check failed'}`);
|
||||
} else {
|
||||
// Monitor is healthy, resolve any active alerts
|
||||
await resolveAlertIfExists(policy, `Monitor ${monitor.id}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[Alert Evaluator] Error evaluating synthetic monitor alerts:', {
|
||||
message: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Trigger an alert if one doesn't already exist
|
||||
*/
|
||||
async function triggerAlert(policy, target, message) {
|
||||
try {
|
||||
// Check if alert already exists
|
||||
const existingAlert = await db.getActiveAlert(policy.id, target);
|
||||
|
||||
if (existingAlert) {
|
||||
// Alert already exists, don't create duplicate
|
||||
return;
|
||||
}
|
||||
|
||||
// Create new alert
|
||||
await db.createAlert({
|
||||
policy_id: policy.id,
|
||||
policy_name: policy.name,
|
||||
target: target,
|
||||
message: message,
|
||||
status: 'active'
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('[Alert Evaluator] Error triggering alert:', {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
policyId: policy.id,
|
||||
policyName: policy.name,
|
||||
target,
|
||||
alertMessage: message
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve alert if it exists
|
||||
*/
|
||||
async function resolveAlertIfExists(policy, target) {
|
||||
try {
|
||||
const existingAlert = await db.getActiveAlert(policy.id, target);
|
||||
|
||||
if (existingAlert) {
|
||||
await db.resolveAlert({
|
||||
alert_id: existingAlert.id
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[Alert Evaluator] Error resolving alert:', {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
policyId: policy.id,
|
||||
policyName: policy.name,
|
||||
target,
|
||||
alertId: existingAlert?.id
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run alert evaluation
|
||||
*/
|
||||
async function evaluateAlerts() {
|
||||
try {
|
||||
await evaluateServerMetricAlerts();
|
||||
await evaluateSyntheticMonitorAlerts();
|
||||
} catch (error) {
|
||||
console.error('[Alert Evaluator] Error in alert evaluation:', {
|
||||
message: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the alert evaluator
|
||||
*/
|
||||
function startAlertEvaluator() {
|
||||
// Run evaluation immediately on startup
|
||||
evaluateAlerts();
|
||||
|
||||
// Then run evaluation every 60 seconds
|
||||
setInterval(evaluateAlerts, 60000);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
evaluateAlerts,
|
||||
evaluateServerMetricAlerts,
|
||||
evaluateSyntheticMonitorAlerts,
|
||||
startAlertEvaluator
|
||||
};
|
||||
2016
server/backend/src/db.js
Normal file
2016
server/backend/src/db.js
Normal file
File diff suppressed because it is too large
Load Diff
1067
server/backend/src/index.js
Normal file
1067
server/backend/src/index.js
Normal file
File diff suppressed because it is too large
Load Diff
27
server/backend/src/middleware/auth.js
Normal file
27
server/backend/src/middleware/auth.js
Normal file
@@ -0,0 +1,27 @@
|
||||
const db = require('../db');
|
||||
|
||||
/**
|
||||
* Middleware to authenticate API key
|
||||
*/
|
||||
async function authenticateApiKey(req, res, next) {
|
||||
const apiKey = req.headers['x-api-key'];
|
||||
|
||||
if (!apiKey) {
|
||||
return res.status(401).json({ error: 'API key required' });
|
||||
}
|
||||
|
||||
const keyInfo = await db.validateApiKey(apiKey);
|
||||
|
||||
if (!keyInfo) {
|
||||
return res.status(403).json({ error: 'Invalid or inactive API key' });
|
||||
}
|
||||
|
||||
// Attach key info to request for use in routes
|
||||
req.apiKeyInfo = keyInfo;
|
||||
next();
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
authenticateApiKey
|
||||
};
|
||||
|
||||
28
server/backend/src/migrate-cli.js
Normal file
28
server/backend/src/migrate-cli.js
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Standalone migration runner
|
||||
* Run this script manually to execute database migrations
|
||||
*
|
||||
* Usage:
|
||||
* node src/migrate-cli.js
|
||||
* npm run migrate
|
||||
*/
|
||||
|
||||
require('dotenv').config();
|
||||
const { runMigrations } = require('./migrate');
|
||||
|
||||
async function main() {
|
||||
console.log('Running database migrations manually...\n');
|
||||
|
||||
try {
|
||||
await runMigrations();
|
||||
console.log('\n✓ Migrations completed successfully');
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error('\n✗ Migration failed:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
|
||||
419
server/backend/src/migrate.js
Normal file
419
server/backend/src/migrate.js
Normal file
@@ -0,0 +1,419 @@
|
||||
/**
|
||||
* Database migration runner
|
||||
* Runs migrations on startup to ensure database schema is up to date
|
||||
*/
|
||||
|
||||
const db = require('./db');
|
||||
|
||||
async function runMigrations() {
|
||||
try {
|
||||
// Check if disks column exists
|
||||
const checkColumnQuery = `
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'metrics' AND column_name = 'disks'
|
||||
`;
|
||||
|
||||
const result = await db.pool.query(checkColumnQuery);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
try {
|
||||
// Add the disks column
|
||||
await db.pool.query(`
|
||||
ALTER TABLE metrics ADD COLUMN disks JSONB
|
||||
`);
|
||||
} catch (error) {
|
||||
// Column might have been added between check and alter
|
||||
if (error.code !== '42701') { // duplicate column error
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Create index if it doesn't exist
|
||||
try {
|
||||
await db.pool.query(`
|
||||
CREATE INDEX IF NOT EXISTS idx_metrics_disks ON metrics USING GIN (disks)
|
||||
`);
|
||||
} catch (error) {
|
||||
console.error('[Migration] Warning: Could not create disks index:', {
|
||||
message: error.message,
|
||||
code: error.code
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Check if server_info column exists
|
||||
const checkServerInfoQuery = `
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'servers' AND column_name = 'server_info'
|
||||
`;
|
||||
|
||||
const serverInfoResult = await db.pool.query(checkServerInfoQuery);
|
||||
|
||||
if (serverInfoResult.rows.length === 0) {
|
||||
try {
|
||||
// Add the server_info column
|
||||
await db.pool.query(`
|
||||
ALTER TABLE servers ADD COLUMN server_info JSONB
|
||||
`);
|
||||
} catch (error) {
|
||||
// Column might have been added between check and alter
|
||||
if (error.code !== '42701') { // duplicate column error
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Create index if it doesn't exist
|
||||
try {
|
||||
await db.pool.query(`
|
||||
CREATE INDEX IF NOT EXISTS idx_servers_server_info ON servers USING GIN (server_info)
|
||||
`);
|
||||
} catch (error) {
|
||||
console.error('[Migration] Warning: Could not create server_info index:', {
|
||||
message: error.message,
|
||||
code: error.code
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Check if client_version column exists
|
||||
const checkClientVersionQuery = `
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'servers' AND column_name = 'client_version'
|
||||
`;
|
||||
|
||||
const clientVersionResult = await db.pool.query(checkClientVersionQuery);
|
||||
|
||||
if (clientVersionResult.rows.length === 0) {
|
||||
try {
|
||||
// Add the client_version column
|
||||
await db.pool.query(`
|
||||
ALTER TABLE servers ADD COLUMN client_version VARCHAR(50)
|
||||
`);
|
||||
} catch (error) {
|
||||
// Column might have been added between check and alter
|
||||
if (error.code !== '42701') { // duplicate column error
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check and add swap columns if they don't exist
|
||||
const checkSwapQuery = `
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'metrics' AND column_name = 'swap_total'
|
||||
`;
|
||||
|
||||
const swapResult = await db.pool.query(checkSwapQuery);
|
||||
|
||||
if (swapResult.rows.length === 0) {
|
||||
try {
|
||||
await db.pool.query(`
|
||||
ALTER TABLE metrics ADD COLUMN swap_total DECIMAL(10,2);
|
||||
ALTER TABLE metrics ADD COLUMN swap_used DECIMAL(10,2);
|
||||
ALTER TABLE metrics ADD COLUMN swap_free DECIMAL(10,2);
|
||||
ALTER TABLE metrics ADD COLUMN swap_percent DECIMAL(5,2);
|
||||
`);
|
||||
} catch (error) {
|
||||
if (error.code !== '42701') {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check and add process_count and uptime_seconds columns if they don't exist
|
||||
const checkProcessCountQuery = `
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'metrics' AND column_name = 'process_count'
|
||||
`;
|
||||
|
||||
const processCountResult = await db.pool.query(checkProcessCountQuery);
|
||||
|
||||
if (processCountResult.rows.length === 0) {
|
||||
try {
|
||||
await db.pool.query(`
|
||||
ALTER TABLE metrics ADD COLUMN process_count INTEGER;
|
||||
ALTER TABLE metrics ADD COLUMN uptime_seconds DECIMAL(10,2);
|
||||
`);
|
||||
} catch (error) {
|
||||
if (error.code !== '42701') {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check and add load_avg columns if they don't exist
|
||||
const checkLoadAvgQuery = `
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'metrics' AND column_name = 'load_avg_1min'
|
||||
`;
|
||||
|
||||
const loadAvgResult = await db.pool.query(checkLoadAvgQuery);
|
||||
|
||||
if (loadAvgResult.rows.length === 0) {
|
||||
try {
|
||||
await db.pool.query(`
|
||||
ALTER TABLE metrics ADD COLUMN load_avg_1min DECIMAL(5,2);
|
||||
ALTER TABLE metrics ADD COLUMN load_avg_5min DECIMAL(5,2);
|
||||
ALTER TABLE metrics ADD COLUMN load_avg_15min DECIMAL(5,2);
|
||||
`);
|
||||
} catch (error) {
|
||||
if (error.code !== '42701') {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if synthetic_monitors table exists
|
||||
const checkSyntheticMonitorsQuery = `
|
||||
SELECT table_name
|
||||
FROM information_schema.tables
|
||||
WHERE table_name = 'synthetic_monitors'
|
||||
`;
|
||||
|
||||
const syntheticMonitorsResult = await db.pool.query(checkSyntheticMonitorsQuery);
|
||||
|
||||
if (syntheticMonitorsResult.rows.length === 0) {
|
||||
try {
|
||||
// Create synthetic_monitors table
|
||||
await db.pool.query(`
|
||||
CREATE TABLE synthetic_monitors (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(255) NOT NULL,
|
||||
type VARCHAR(50) NOT NULL CHECK (type IN ('http_status', 'ping', 'port_check')),
|
||||
target VARCHAR(500) NOT NULL,
|
||||
expected_status INTEGER,
|
||||
port INTEGER,
|
||||
interval INTEGER NOT NULL DEFAULT 60,
|
||||
enabled BOOLEAN DEFAULT TRUE,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
|
||||
)
|
||||
`);
|
||||
|
||||
// Create monitor_results table
|
||||
await db.pool.query(`
|
||||
CREATE TABLE monitor_results (
|
||||
id SERIAL PRIMARY KEY,
|
||||
monitor_id INTEGER NOT NULL REFERENCES synthetic_monitors(id) ON DELETE CASCADE,
|
||||
status VARCHAR(20) NOT NULL CHECK (status IN ('success', 'failed')),
|
||||
response_time INTEGER,
|
||||
message TEXT,
|
||||
timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
|
||||
)
|
||||
`);
|
||||
|
||||
// Create indexes
|
||||
await db.pool.query(`
|
||||
CREATE INDEX idx_monitor_results_monitor_id ON monitor_results(monitor_id);
|
||||
CREATE INDEX idx_monitor_results_timestamp ON monitor_results(timestamp DESC);
|
||||
CREATE INDEX idx_synthetic_monitors_enabled ON synthetic_monitors(enabled) WHERE enabled = TRUE
|
||||
`);
|
||||
|
||||
// Create trigger function for updated_at
|
||||
await db.pool.query(`
|
||||
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = NOW();
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql
|
||||
`);
|
||||
|
||||
// Create trigger
|
||||
await db.pool.query(`
|
||||
CREATE TRIGGER trigger_update_synthetic_monitors_updated_at
|
||||
BEFORE UPDATE ON synthetic_monitors
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_at_column()
|
||||
`);
|
||||
|
||||
} catch (error) {
|
||||
console.error('[Migration] Error creating synthetic monitors tables:', {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
stack: error.stack
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if alert_policies table exists
|
||||
const checkAlertPoliciesQuery = `
|
||||
SELECT table_name
|
||||
FROM information_schema.tables
|
||||
WHERE table_name = 'alert_policies'
|
||||
`;
|
||||
|
||||
const alertPoliciesResult = await db.pool.query(checkAlertPoliciesQuery);
|
||||
|
||||
if (alertPoliciesResult.rows.length === 0) {
|
||||
try {
|
||||
// Create alert_policies table
|
||||
await db.pool.query(`
|
||||
CREATE TABLE alert_policies (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(255) NOT NULL,
|
||||
type VARCHAR(50) NOT NULL CHECK (type IN ('server_metric', 'synthetic_monitor')),
|
||||
metric_type VARCHAR(50) CHECK (metric_type IN ('cpu_high', 'ram_high', 'disk_used', 'server_not_reporting', 'client_out_of_date')),
|
||||
monitor_id INTEGER REFERENCES synthetic_monitors(id) ON DELETE CASCADE,
|
||||
threshold DECIMAL(10,2),
|
||||
server_id VARCHAR(255),
|
||||
enabled BOOLEAN DEFAULT TRUE,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
|
||||
)
|
||||
`);
|
||||
|
||||
// Create alerts table
|
||||
await db.pool.query(`
|
||||
CREATE TABLE alerts (
|
||||
id SERIAL PRIMARY KEY,
|
||||
policy_id INTEGER NOT NULL REFERENCES alert_policies(id) ON DELETE CASCADE,
|
||||
policy_name VARCHAR(255) NOT NULL,
|
||||
target VARCHAR(255),
|
||||
message TEXT NOT NULL,
|
||||
status VARCHAR(20) NOT NULL CHECK (status IN ('active', 'resolved')) DEFAULT 'active',
|
||||
triggered_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
resolved_at TIMESTAMP WITH TIME ZONE
|
||||
)
|
||||
`);
|
||||
|
||||
// Create indexes
|
||||
await db.pool.query(`
|
||||
CREATE INDEX idx_alerts_policy_id ON alerts(policy_id);
|
||||
CREATE INDEX idx_alerts_status ON alerts(status);
|
||||
CREATE INDEX idx_alerts_triggered_at ON alerts(triggered_at DESC);
|
||||
CREATE INDEX idx_alert_policies_enabled ON alert_policies(enabled) WHERE enabled = TRUE;
|
||||
CREATE INDEX idx_alert_policies_type ON alert_policies(type)
|
||||
`);
|
||||
|
||||
// Create trigger function for updated_at (if not exists)
|
||||
await db.pool.query(`
|
||||
CREATE OR REPLACE FUNCTION update_alert_policy_updated_at()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = NOW();
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql
|
||||
`);
|
||||
|
||||
// Create trigger
|
||||
await db.pool.query(`
|
||||
CREATE TRIGGER trigger_update_alert_policies_updated_at
|
||||
BEFORE UPDATE ON alert_policies
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_alert_policy_updated_at()
|
||||
`);
|
||||
|
||||
} catch (error) {
|
||||
console.error('[Migration] Error creating alert policies tables:', {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
stack: error.stack
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if vulnerabilities table exists
|
||||
const checkVulnerabilitiesQuery = `
|
||||
SELECT table_name
|
||||
FROM information_schema.tables
|
||||
WHERE table_name = 'vulnerabilities'
|
||||
`;
|
||||
|
||||
const vulnerabilitiesResult = await db.pool.query(checkVulnerabilitiesQuery);
|
||||
|
||||
if (vulnerabilitiesResult.rows.length === 0) {
|
||||
try {
|
||||
// Create vulnerabilities table (vulnerability-centric)
|
||||
await db.pool.query(`
|
||||
CREATE TABLE vulnerabilities (
|
||||
id SERIAL PRIMARY KEY,
|
||||
cve_id VARCHAR(50) NOT NULL,
|
||||
package_name VARCHAR(255) NOT NULL,
|
||||
ecosystem VARCHAR(50) NOT NULL DEFAULT 'Ubuntu',
|
||||
severity VARCHAR(20),
|
||||
summary TEXT,
|
||||
description TEXT,
|
||||
fixed_version VARCHAR(255),
|
||||
affected_version_range TEXT,
|
||||
published_at TIMESTAMP WITH TIME ZONE,
|
||||
modified_at TIMESTAMP WITH TIME ZONE,
|
||||
first_detected TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
last_seen TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(cve_id, package_name, ecosystem)
|
||||
)
|
||||
`);
|
||||
|
||||
// Create server_vulnerabilities table (many-to-many)
|
||||
await db.pool.query(`
|
||||
CREATE TABLE server_vulnerabilities (
|
||||
id SERIAL PRIMARY KEY,
|
||||
vulnerability_id INTEGER NOT NULL REFERENCES vulnerabilities(id) ON DELETE CASCADE,
|
||||
server_id VARCHAR(255) NOT NULL,
|
||||
installed_version VARCHAR(255) NOT NULL,
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'new' CHECK (status IN ('new', 'fixed', 'ongoing')),
|
||||
first_detected TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
last_seen TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(vulnerability_id, server_id)
|
||||
)
|
||||
`);
|
||||
|
||||
// Create vulnerability_cache table for OSV API results
|
||||
await db.pool.query(`
|
||||
CREATE TABLE vulnerability_cache (
|
||||
id SERIAL PRIMARY KEY,
|
||||
package_name VARCHAR(255) NOT NULL,
|
||||
package_version VARCHAR(255) NOT NULL,
|
||||
ecosystem VARCHAR(50) NOT NULL DEFAULT 'Ubuntu',
|
||||
vulnerabilities JSONB,
|
||||
cached_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
UNIQUE(package_name, package_version, ecosystem)
|
||||
)
|
||||
`);
|
||||
|
||||
// Create indexes
|
||||
await db.pool.query(`
|
||||
CREATE INDEX idx_vulnerabilities_cve_id ON vulnerabilities(cve_id);
|
||||
CREATE INDEX idx_vulnerabilities_package_name ON vulnerabilities(package_name);
|
||||
CREATE INDEX idx_vulnerabilities_severity ON vulnerabilities(severity);
|
||||
CREATE INDEX idx_vulnerabilities_last_seen ON vulnerabilities(last_seen DESC);
|
||||
CREATE INDEX idx_server_vulnerabilities_server_id ON server_vulnerabilities(server_id);
|
||||
CREATE INDEX idx_server_vulnerabilities_status ON server_vulnerabilities(status);
|
||||
CREATE INDEX idx_server_vulnerabilities_vulnerability_id ON server_vulnerabilities(vulnerability_id);
|
||||
CREATE INDEX idx_vulnerability_cache_expires_at ON vulnerability_cache(expires_at);
|
||||
CREATE INDEX idx_vulnerability_cache_lookup ON vulnerability_cache(package_name, package_version, ecosystem)
|
||||
`);
|
||||
|
||||
} catch (error) {
|
||||
console.error('[Migration] Error creating vulnerabilities tables:', {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
stack: error.stack
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[Migration] Error running migrations:', {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
stack: error.stack
|
||||
});
|
||||
// Don't throw - allow server to start even if migration fails
|
||||
// (column might already exist or there might be a permission issue)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { runMigrations };
|
||||
|
||||
282
server/backend/src/monitorRunner.js
Normal file
282
server/backend/src/monitorRunner.js
Normal file
@@ -0,0 +1,282 @@
|
||||
/**
|
||||
* Synthetic Monitor Runner
|
||||
* Periodically executes checks for enabled synthetic monitors
|
||||
*/
|
||||
|
||||
const db = require('./db');
|
||||
const http = require('http');
|
||||
const https = require('https');
|
||||
const { promisify } = require('util');
|
||||
const dns = require('dns');
|
||||
const net = require('net');
|
||||
|
||||
const dnsLookup = promisify(dns.lookup);
|
||||
|
||||
/**
|
||||
* Execute HTTP status check
|
||||
*/
|
||||
async function checkHttpStatus(monitor) {
|
||||
const startTime = Date.now();
|
||||
const url = monitor.target;
|
||||
|
||||
return new Promise((resolve) => {
|
||||
const client = url.startsWith('https') ? https : http;
|
||||
|
||||
const req = client.get(url, { timeout: 10000 }, (res) => {
|
||||
const responseTime = Date.now() - startTime;
|
||||
const statusCode = res.statusCode;
|
||||
const expectedStatus = monitor.expected_status || 200;
|
||||
|
||||
// Consume response to free up resources
|
||||
res.on('data', () => {});
|
||||
res.on('end', () => {
|
||||
if (statusCode === expectedStatus) {
|
||||
resolve({
|
||||
status: 'success',
|
||||
response_time: responseTime,
|
||||
message: `HTTP ${statusCode} (expected ${expectedStatus})`
|
||||
});
|
||||
} else {
|
||||
resolve({
|
||||
status: 'failed',
|
||||
response_time: responseTime,
|
||||
message: `HTTP ${statusCode} (expected ${expectedStatus})`
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', (error) => {
|
||||
const responseTime = Date.now() - startTime;
|
||||
resolve({
|
||||
status: 'failed',
|
||||
response_time: responseTime,
|
||||
message: `Connection error: ${error.message}`
|
||||
});
|
||||
});
|
||||
|
||||
req.on('timeout', () => {
|
||||
req.destroy();
|
||||
const responseTime = Date.now() - startTime;
|
||||
resolve({
|
||||
status: 'failed',
|
||||
response_time: responseTime,
|
||||
message: 'Request timeout'
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute ping check (ICMP-like check using DNS lookup + TCP connection)
|
||||
*/
|
||||
async function checkPing(monitor) {
|
||||
const startTime = Date.now();
|
||||
const hostname = monitor.target.replace(/^https?:\/\//, '').replace(/\/.*$/, '');
|
||||
|
||||
try {
|
||||
// Resolve DNS
|
||||
const dnsStart = Date.now();
|
||||
await dnsLookup(hostname);
|
||||
const dnsTime = Date.now() - dnsStart;
|
||||
|
||||
// Try to establish a TCP connection to port 80 or 443
|
||||
const port = monitor.target.startsWith('https') ? 443 : 80;
|
||||
const connectStart = Date.now();
|
||||
|
||||
return new Promise((resolve) => {
|
||||
const socket = new net.Socket();
|
||||
socket.setTimeout(5000);
|
||||
|
||||
socket.on('connect', () => {
|
||||
socket.destroy();
|
||||
const responseTime = Date.now() - startTime;
|
||||
resolve({
|
||||
status: 'success',
|
||||
response_time: responseTime,
|
||||
message: `Host reachable (DNS: ${dnsTime}ms, TCP: ${Date.now() - connectStart}ms)`
|
||||
});
|
||||
});
|
||||
|
||||
socket.on('error', (error) => {
|
||||
const responseTime = Date.now() - startTime;
|
||||
resolve({
|
||||
status: 'failed',
|
||||
response_time: responseTime,
|
||||
message: `Connection failed: ${error.message}`
|
||||
});
|
||||
});
|
||||
|
||||
socket.on('timeout', () => {
|
||||
socket.destroy();
|
||||
const responseTime = Date.now() - startTime;
|
||||
resolve({
|
||||
status: 'failed',
|
||||
response_time: responseTime,
|
||||
message: 'Connection timeout'
|
||||
});
|
||||
});
|
||||
|
||||
socket.connect(port, hostname);
|
||||
});
|
||||
} catch (error) {
|
||||
const responseTime = Date.now() - startTime;
|
||||
return {
|
||||
status: 'failed',
|
||||
response_time: responseTime,
|
||||
message: `DNS lookup failed: ${error.message}`
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute port check
|
||||
*/
|
||||
async function checkPort(monitor) {
|
||||
const startTime = Date.now();
|
||||
const hostname = monitor.target;
|
||||
const port = monitor.port || 80;
|
||||
|
||||
return new Promise((resolve) => {
|
||||
const socket = new net.Socket();
|
||||
socket.setTimeout(5000);
|
||||
|
||||
socket.on('connect', () => {
|
||||
socket.destroy();
|
||||
const responseTime = Date.now() - startTime;
|
||||
resolve({
|
||||
status: 'success',
|
||||
response_time: responseTime,
|
||||
message: `Port ${port} is open`
|
||||
});
|
||||
});
|
||||
|
||||
socket.on('error', (error) => {
|
||||
const responseTime = Date.now() - startTime;
|
||||
resolve({
|
||||
status: 'failed',
|
||||
response_time: responseTime,
|
||||
message: `Port ${port} is closed or unreachable: ${error.message}`
|
||||
});
|
||||
});
|
||||
|
||||
socket.on('timeout', () => {
|
||||
socket.destroy();
|
||||
const responseTime = Date.now() - startTime;
|
||||
resolve({
|
||||
status: 'failed',
|
||||
response_time: responseTime,
|
||||
message: `Port ${port} connection timeout`
|
||||
});
|
||||
});
|
||||
|
||||
socket.connect(port, hostname);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a monitor check based on its type
|
||||
*/
|
||||
async function executeMonitorCheck(monitor) {
|
||||
try {
|
||||
let result;
|
||||
|
||||
switch (monitor.type) {
|
||||
case 'http_status':
|
||||
result = await checkHttpStatus(monitor);
|
||||
break;
|
||||
case 'ping':
|
||||
result = await checkPing(monitor);
|
||||
break;
|
||||
case 'port_check':
|
||||
result = await checkPort(monitor);
|
||||
break;
|
||||
default:
|
||||
result = {
|
||||
status: 'failed',
|
||||
message: `Unknown monitor type: ${monitor.type}`
|
||||
};
|
||||
}
|
||||
|
||||
// Save result to database
|
||||
await db.saveMonitorResult(monitor.id, result);
|
||||
|
||||
// Update monitor's updated_at timestamp to track last check time
|
||||
await db.pool.query(
|
||||
'UPDATE synthetic_monitors SET updated_at = NOW() WHERE id = $1',
|
||||
[monitor.id]
|
||||
);
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
console.error(`[Monitor Runner] Error executing check for monitor ${monitor.id}:`, {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
monitorId: monitor.id,
|
||||
monitorName: monitor.name,
|
||||
monitorType: monitor.type
|
||||
});
|
||||
await db.saveMonitorResult(monitor.id, {
|
||||
status: 'failed',
|
||||
message: `Check execution error: ${error.message}`
|
||||
});
|
||||
return {
|
||||
status: 'failed',
|
||||
message: `Check execution error: ${error.message}`
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a monitor needs to be executed based on its interval
|
||||
*/
|
||||
function shouldExecuteMonitor(monitor) {
|
||||
const now = new Date();
|
||||
const lastCheck = monitor.updated_at ? new Date(monitor.updated_at) : new Date(0);
|
||||
const intervalMs = (monitor.interval || 60) * 1000;
|
||||
|
||||
return (now - lastCheck) >= intervalMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run checks for all enabled monitors that need checking
|
||||
*/
|
||||
async function runMonitorChecks() {
|
||||
try {
|
||||
const monitors = await db.getEnabledMonitors();
|
||||
const monitorsToCheck = monitors.filter(shouldExecuteMonitor);
|
||||
|
||||
if (monitorsToCheck.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Execute checks in parallel (with limit to avoid overwhelming the system)
|
||||
const batchSize = 10;
|
||||
for (let i = 0; i < monitorsToCheck.length; i += batchSize) {
|
||||
const batch = monitorsToCheck.slice(i, i + batchSize);
|
||||
await Promise.all(batch.map(executeMonitorCheck));
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[Monitor Runner] Error running monitor checks:', {
|
||||
message: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the monitor runner
|
||||
*/
|
||||
function startMonitorRunner() {
|
||||
// Run checks immediately on startup
|
||||
runMonitorChecks();
|
||||
|
||||
// Then run checks every 30 seconds
|
||||
setInterval(runMonitorChecks, 30000);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
runMonitorChecks,
|
||||
startMonitorRunner,
|
||||
executeMonitorCheck
|
||||
};
|
||||
504
server/backend/src/osvClient.js
Normal file
504
server/backend/src/osvClient.js
Normal file
@@ -0,0 +1,504 @@
|
||||
/**
|
||||
* OSV (Open Source Vulnerabilities) API Client
|
||||
* Queries the OSV API for vulnerability information
|
||||
*/
|
||||
|
||||
const axios = require('axios');
|
||||
|
||||
const OSV_API_URL = 'https://api.osv.dev/v1';
|
||||
|
||||
/**
|
||||
* Query OSV API for vulnerabilities affecting a package version
|
||||
* @param {string} packageName - Package name
|
||||
* @param {string} version - Package version
|
||||
* @param {string} ecosystem - Ecosystem (e.g., 'Ubuntu', 'Debian')
|
||||
* @returns {Promise<Array>} Array of vulnerability objects
|
||||
*/
|
||||
async function queryPackage(packageName, version, ecosystem = 'Ubuntu') {
|
||||
try {
|
||||
const response = await axios.post(
|
||||
`${OSV_API_URL}/query`,
|
||||
{
|
||||
package: {
|
||||
name: packageName,
|
||||
ecosystem: ecosystem
|
||||
},
|
||||
version: version
|
||||
},
|
||||
{
|
||||
timeout: 30000 // 30 second timeout
|
||||
}
|
||||
);
|
||||
|
||||
if (response.data && response.data.vulns) {
|
||||
return response.data.vulns;
|
||||
}
|
||||
|
||||
return [];
|
||||
} catch (error) {
|
||||
if (error.response) {
|
||||
console.error(`[OSV Client] API error for ${packageName}@${version}:`, {
|
||||
status: error.response.status,
|
||||
statusText: error.response.statusText,
|
||||
data: error.response.data
|
||||
});
|
||||
} else if (error.request) {
|
||||
console.error(`[OSV Client] Network error for ${packageName}@${version}:`, {
|
||||
message: error.message
|
||||
});
|
||||
} else {
|
||||
console.error(`[OSV Client] Error querying ${packageName}@${version}:`, {
|
||||
message: error.message
|
||||
});
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch full vulnerability details by ID
|
||||
* @param {string} vulnId - OSV vulnerability ID
|
||||
* @returns {Promise<Object|null>} Full vulnerability object or null
|
||||
*/
|
||||
async function getVulnerabilityDetails(vulnId) {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
`${OSV_API_URL}/vulns/${encodeURIComponent(vulnId)}`,
|
||||
{
|
||||
timeout: 30000
|
||||
}
|
||||
);
|
||||
|
||||
return response.data || null;
|
||||
} catch (error) {
|
||||
if (error.response && error.response.status === 404) {
|
||||
console.error(`[OSV Client] Vulnerability not found: ${vulnId}`);
|
||||
return null;
|
||||
}
|
||||
console.error(`[OSV Client] Error fetching vulnerability ${vulnId}:`, {
|
||||
message: error.message,
|
||||
status: error.response?.status
|
||||
});
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch fetch full vulnerability details
|
||||
* @param {Array<string>} vulnIds - Array of vulnerability IDs
|
||||
* @returns {Promise<Array>} Array of full vulnerability objects
|
||||
*/
|
||||
async function batchGetVulnerabilityDetails(vulnIds) {
|
||||
if (!vulnIds || vulnIds.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Fetch in parallel batches to avoid overwhelming the API
|
||||
const batchSize = 10;
|
||||
const results = [];
|
||||
|
||||
for (let i = 0; i < vulnIds.length; i += batchSize) {
|
||||
const batch = vulnIds.slice(i, i + batchSize);
|
||||
const batchPromises = batch.map(id => getVulnerabilityDetails(id));
|
||||
const batchResults = await Promise.all(batchPromises);
|
||||
results.push(...batchResults.filter(v => v !== null));
|
||||
|
||||
// Small delay between batches to be respectful to the API
|
||||
if (i + batchSize < vulnIds.length) {
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch query OSV API for multiple packages
|
||||
* @param {Array} packages - Array of {name, version, ecosystem} objects
|
||||
* @returns {Promise<Array>} Array of {package, vulnerabilities} objects with full vulnerability details
|
||||
*/
|
||||
async function queryBatch(packages) {
|
||||
if (!packages || packages.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
try {
|
||||
const queries = packages.map(pkg => ({
|
||||
package: {
|
||||
name: pkg.name,
|
||||
ecosystem: pkg.ecosystem || 'Ubuntu'
|
||||
},
|
||||
version: pkg.version
|
||||
}));
|
||||
|
||||
const response = await axios.post(
|
||||
`${OSV_API_URL}/querybatch`,
|
||||
{
|
||||
queries: queries
|
||||
},
|
||||
{
|
||||
timeout: 60000 // 60 second timeout for batch queries
|
||||
}
|
||||
);
|
||||
|
||||
if (response.data && response.data.results) {
|
||||
// Collect all unique vulnerability IDs
|
||||
const vulnIdSet = new Set();
|
||||
const packageVulnMap = new Map();
|
||||
|
||||
response.data.results.forEach((result, index) => {
|
||||
const vulnIds = (result.vulns || []).map(v => v.id || v).filter(Boolean);
|
||||
packageVulnMap.set(index, vulnIds);
|
||||
vulnIds.forEach(id => vulnIdSet.add(id));
|
||||
});
|
||||
|
||||
// Fetch full vulnerability details for all unique IDs
|
||||
const uniqueVulnIds = Array.from(vulnIdSet);
|
||||
console.log(`[OSV Client] Fetching full details for ${uniqueVulnIds.length} unique vulnerabilities...`);
|
||||
const fullVulns = await batchGetVulnerabilityDetails(uniqueVulnIds);
|
||||
|
||||
// Create a map of ID to full vulnerability object
|
||||
const vulnMap = new Map();
|
||||
fullVulns.forEach(vuln => {
|
||||
if (vuln.id) {
|
||||
vulnMap.set(vuln.id, vuln);
|
||||
}
|
||||
});
|
||||
|
||||
// Map back to packages with full vulnerability objects
|
||||
return response.data.results.map((result, index) => {
|
||||
const vulnIds = packageVulnMap.get(index) || [];
|
||||
const vulnerabilities = vulnIds
|
||||
.map(id => vulnMap.get(id))
|
||||
.filter(v => v !== undefined);
|
||||
|
||||
return {
|
||||
package: packages[index],
|
||||
vulnerabilities: vulnerabilities
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
return [];
|
||||
} catch (error) {
|
||||
if (error.response) {
|
||||
console.error('[OSV Client] Batch API error:', {
|
||||
status: error.response.status,
|
||||
statusText: error.response.statusText,
|
||||
url: `${OSV_API_URL}/querybatch`,
|
||||
packageCount: packages.length,
|
||||
responseData: error.response.data,
|
||||
sampleQueries: queries.slice(0, 3) // Show first 3 queries for debugging
|
||||
});
|
||||
} else if (error.request) {
|
||||
console.error('[OSV Client] Batch network error:', {
|
||||
message: error.message,
|
||||
url: `${OSV_API_URL}/querybatch`,
|
||||
packageCount: packages.length
|
||||
});
|
||||
} else {
|
||||
console.error('[OSV Client] Batch query error:', {
|
||||
message: error.message,
|
||||
url: `${OSV_API_URL}/querybatch`,
|
||||
packageCount: packages.length
|
||||
});
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse CVSS vector string to extract numeric score
|
||||
* CVSS vectors are in format: CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H
|
||||
* We'll extract the base score by parsing the vector components
|
||||
* @param {string} vectorString - CVSS vector string
|
||||
* @returns {number|null} Numeric CVSS score or null
|
||||
*/
|
||||
function parseCvssScore(vectorString) {
|
||||
if (!vectorString || typeof vectorString !== 'string') {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Try to extract numeric score if present (some vectors include score)
|
||||
const scoreMatch = vectorString.match(/CVSS:\d+\.\d+\/([^/]+)/);
|
||||
if (!scoreMatch) return null;
|
||||
|
||||
// For now, we'll use a simplified approach:
|
||||
// Parse the vector components to estimate severity
|
||||
// C (Confidentiality), I (Integrity), A (Availability)
|
||||
const components = vectorString.split('/');
|
||||
let hasHighImpact = false;
|
||||
|
||||
for (const comp of components) {
|
||||
if (comp.startsWith('C:') || comp.startsWith('I:') || comp.startsWith('A:')) {
|
||||
const value = comp.split(':')[1];
|
||||
if (value === 'H') {
|
||||
hasHighImpact = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is a simplified estimation - in production, you'd want a full CVSS calculator
|
||||
// For now, return null and let the Ubuntu priority handle it
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse CVSS vector string to extract base score
|
||||
* CVSS vectors are in format: CVSS:3.1/AV:L/AC:H/PR:L/UI:N/S:C/C:H/I:H/A:H
|
||||
* We'll parse the vector components to estimate severity
|
||||
* @param {string} vectorString - CVSS vector string
|
||||
* @returns {number|null} Estimated CVSS score or null
|
||||
*/
|
||||
function parseCvssVector(vectorString) {
|
||||
if (!vectorString || typeof vectorString !== 'string') {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Extract components from vector string
|
||||
const components = vectorString.split('/');
|
||||
let av = null, ac = null, pr = null, ui = null, s = null, c = null, i = null, a = null;
|
||||
|
||||
for (const comp of components) {
|
||||
if (comp.startsWith('AV:')) av = comp.split(':')[1];
|
||||
else if (comp.startsWith('AC:')) ac = comp.split(':')[1];
|
||||
else if (comp.startsWith('PR:')) pr = comp.split(':')[1];
|
||||
else if (comp.startsWith('UI:')) ui = comp.split(':')[1];
|
||||
else if (comp.startsWith('S:')) s = comp.split(':')[1];
|
||||
else if (comp.startsWith('C:')) c = comp.split(':')[1];
|
||||
else if (comp.startsWith('I:')) i = comp.split(':')[1];
|
||||
else if (comp.startsWith('A:')) a = comp.split(':')[1];
|
||||
}
|
||||
|
||||
// Simplified scoring based on impact (C, I, A)
|
||||
// H = High, L = Low, N = None
|
||||
const impactScore =
|
||||
(c === 'H' ? 0.66 : c === 'L' ? 0.22 : 0) +
|
||||
(i === 'H' ? 0.66 : i === 'L' ? 0.22 : 0) +
|
||||
(a === 'H' ? 0.66 : a === 'L' ? 0.22 : 0);
|
||||
|
||||
// Base score approximation (simplified)
|
||||
if (impactScore >= 1.8) return 9.0; // Critical
|
||||
if (impactScore >= 1.2) return 7.0; // High
|
||||
if (impactScore >= 0.4) return 4.0; // Medium
|
||||
if (impactScore > 0) return 2.0; // Low
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract severity from OSV vulnerability object
|
||||
* @param {Object} vuln - OSV vulnerability object
|
||||
* @returns {string} Severity level (CRITICAL, HIGH, MEDIUM, LOW) or null
|
||||
*/
|
||||
function extractSeverity(vuln) {
|
||||
// Priority 1: Check Ubuntu-specific priority (most reliable for Ubuntu CVEs)
|
||||
if (vuln.severity && Array.isArray(vuln.severity)) {
|
||||
for (const sev of vuln.severity) {
|
||||
if (sev.type === 'Ubuntu' && sev.score) {
|
||||
const ubuntuPriority = sev.score.toLowerCase();
|
||||
// Map Ubuntu priorities to our severity levels
|
||||
if (ubuntuPriority === 'critical') return 'CRITICAL';
|
||||
if (ubuntuPriority === 'high') return 'HIGH';
|
||||
if (ubuntuPriority === 'medium') return 'MEDIUM';
|
||||
if (ubuntuPriority === 'low' || ubuntuPriority === 'negligible') return 'LOW';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Priority 2: Parse CVSS vectors from severity array
|
||||
if (vuln.severity && Array.isArray(vuln.severity)) {
|
||||
for (const sev of vuln.severity) {
|
||||
if (sev.type && (sev.type.startsWith('CVSS_V3') || sev.type.startsWith('CVSS_V4')) && sev.score) {
|
||||
// Parse CVSS vector string to estimate score
|
||||
const estimatedScore = parseCvssVector(sev.score);
|
||||
if (estimatedScore !== null) {
|
||||
if (estimatedScore >= 9.0) return 'CRITICAL';
|
||||
if (estimatedScore >= 7.0) return 'HIGH';
|
||||
if (estimatedScore >= 4.0) return 'MEDIUM';
|
||||
if (estimatedScore > 0) return 'LOW';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Priority 3: Check database_specific for Ubuntu severity
|
||||
if (vuln.database_specific) {
|
||||
if (vuln.database_specific.severity) {
|
||||
const sev = vuln.database_specific.severity.toLowerCase();
|
||||
if (sev === 'critical') return 'CRITICAL';
|
||||
if (sev === 'high') return 'HIGH';
|
||||
if (sev === 'medium') return 'MEDIUM';
|
||||
if (sev === 'low' || sev === 'negligible') return 'LOW';
|
||||
}
|
||||
// Check for numeric CVSS score in database_specific
|
||||
if (vuln.database_specific.cvss_score !== undefined) {
|
||||
const score = parseFloat(vuln.database_specific.cvss_score);
|
||||
if (score >= 9.0) return 'CRITICAL';
|
||||
if (score >= 7.0) return 'HIGH';
|
||||
if (score >= 4.0) return 'MEDIUM';
|
||||
if (score > 0) return 'LOW';
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract CVE ID from OSV vulnerability object
|
||||
* OSV vulnerabilities have the actual CVE ID in the "upstream" array
|
||||
* @param {Object} vuln - OSV vulnerability object
|
||||
* @returns {string|null} Standard CVE ID or null
|
||||
*/
|
||||
function extractCveId(vuln) {
|
||||
if (!vuln) return null;
|
||||
|
||||
// First, try to get CVE from upstream array (most reliable)
|
||||
if (vuln.upstream && Array.isArray(vuln.upstream)) {
|
||||
const cveId = vuln.upstream.find(id => id.startsWith('CVE-'));
|
||||
if (cveId) {
|
||||
return cveId;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: try to extract from OSV ID
|
||||
const osvId = vuln.id || '';
|
||||
|
||||
// Handle Ubuntu-specific format: UBUNTU-CVE-2022-49737 -> CVE-2022-49737
|
||||
if (osvId.startsWith('UBUNTU-CVE-')) {
|
||||
return osvId.replace('UBUNTU-CVE-', 'CVE-');
|
||||
}
|
||||
|
||||
// Handle standard CVE format
|
||||
if (osvId.startsWith('CVE-')) {
|
||||
return osvId;
|
||||
}
|
||||
|
||||
// For other formats (GHSA, etc.), return as-is but note it's not a CVE
|
||||
return osvId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize OSV vulnerability to our format
|
||||
* @param {Object} vuln - OSV vulnerability object
|
||||
* @param {string} packageName - Package name
|
||||
* @param {string} ecosystem - Ecosystem
|
||||
* @param {string} installedVersion - Installed package version (optional)
|
||||
* @returns {Object} Normalized vulnerability object
|
||||
*/
|
||||
function normalizeVulnerability(vuln, packageName, ecosystem, installedVersion = null) {
|
||||
// Extract proper CVE ID from vulnerability object (checks upstream array first)
|
||||
const cveId = extractCveId(vuln);
|
||||
|
||||
return {
|
||||
cve_id: cveId, // Store the normalized CVE ID
|
||||
osv_id: vuln.id, // Keep original OSV ID for reference
|
||||
package_name: packageName,
|
||||
ecosystem: ecosystem,
|
||||
severity: extractSeverity(vuln),
|
||||
summary: vuln.summary || null,
|
||||
description: vuln.details || vuln.summary || null,
|
||||
fixed_version: extractFixedVersion(vuln, ecosystem, installedVersion),
|
||||
affected_version_range: JSON.stringify(vuln.affected || []),
|
||||
published_at: vuln.published ? new Date(vuln.published) : null,
|
||||
modified_at: vuln.modified ? new Date(vuln.modified) : null
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract fixed version from OSV vulnerability
|
||||
* @param {Object} vuln - OSV vulnerability object
|
||||
* @param {string} ecosystem - Ecosystem (e.g., 'Ubuntu:24.04:LTS')
|
||||
* @param {string} installedVersion - Installed package version
|
||||
* @returns {string|null} Fixed version or null
|
||||
*/
|
||||
function extractFixedVersion(vuln, ecosystem = null, installedVersion = null) {
|
||||
if (!vuln.affected || vuln.affected.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Find the affected entry that matches our ecosystem
|
||||
let matchingAffected = null;
|
||||
for (const affected of vuln.affected) {
|
||||
if (affected.package && affected.package.ecosystem) {
|
||||
const affectedEco = affected.package.ecosystem;
|
||||
// Match ecosystem (e.g., "Ubuntu:24.04:LTS")
|
||||
if (ecosystem && affectedEco === ecosystem) {
|
||||
matchingAffected = affected;
|
||||
break;
|
||||
}
|
||||
// Also check if ecosystem starts with our base (for fallback)
|
||||
if (ecosystem && affectedEco.startsWith(ecosystem.split(':')[0])) {
|
||||
if (!matchingAffected) {
|
||||
matchingAffected = affected;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no exact match, use first affected entry
|
||||
if (!matchingAffected && vuln.affected.length > 0) {
|
||||
matchingAffected = vuln.affected[0];
|
||||
}
|
||||
|
||||
if (!matchingAffected) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Check ranges for fixed events
|
||||
if (matchingAffected.ranges && matchingAffected.ranges.length > 0) {
|
||||
for (const range of matchingAffected.ranges) {
|
||||
if (range.type === 'ECOSYSTEM' && range.events) {
|
||||
// Look for fixed events (most recent fixed version)
|
||||
let fixedVersion = null;
|
||||
for (const event of range.events) {
|
||||
if (event.fixed) {
|
||||
fixedVersion = event.fixed;
|
||||
}
|
||||
}
|
||||
if (fixedVersion) {
|
||||
return fixedVersion;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For Ubuntu, check if installed version is in the affected versions list
|
||||
// If it is, and there are newer versions, the newest version might be the fix
|
||||
if (matchingAffected.versions && Array.isArray(matchingAffected.versions) && installedVersion) {
|
||||
const versions = matchingAffected.versions;
|
||||
const installedIndex = versions.indexOf(installedVersion);
|
||||
|
||||
// If installed version is in the list and not the last one, return the last version as potential fix
|
||||
// Note: This is a heuristic - Ubuntu might not have fixed it yet
|
||||
if (installedIndex >= 0 && installedIndex < versions.length - 1) {
|
||||
// Check if there's a newer version (potential fix)
|
||||
const lastVersion = versions[versions.length - 1];
|
||||
// Only return if it's clearly newer (simple string comparison might not work for all version formats)
|
||||
// For now, return null - we need better version comparison
|
||||
// return lastVersion;
|
||||
}
|
||||
}
|
||||
|
||||
// Check database_specific for Ubuntu-specific fixed version info
|
||||
if (matchingAffected.database_specific && matchingAffected.database_specific.fixed_version) {
|
||||
return matchingAffected.database_specific.fixed_version;
|
||||
}
|
||||
|
||||
if (vuln.database_specific && vuln.database_specific.fixed_version) {
|
||||
return vuln.database_specific.fixed_version;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
queryPackage,
|
||||
queryBatch,
|
||||
getVulnerabilityDetails,
|
||||
batchGetVulnerabilityDetails,
|
||||
normalizeVulnerability,
|
||||
extractSeverity,
|
||||
extractCveId
|
||||
};
|
||||
90
server/backend/src/packageBuilder.js
Normal file
90
server/backend/src/packageBuilder.js
Normal file
@@ -0,0 +1,90 @@
|
||||
const { exec } = require('child_process');
|
||||
const fs = require('fs').promises;
|
||||
const path = require('path');
|
||||
const { promisify } = require('util');
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
/**
|
||||
* Build a debian package with embedded configuration
|
||||
*/
|
||||
async function buildDebPackage(serverUrl, serverId, apiKey, outputDir) {
|
||||
const clientDir = path.join(__dirname, '../../../../clients/ubuntu');
|
||||
const buildDir = path.join(outputDir, 'build');
|
||||
const packageDir = path.join(buildDir, 'oculog-client');
|
||||
|
||||
try {
|
||||
// Create build directory
|
||||
await fs.mkdir(buildDir, { recursive: true });
|
||||
await fs.mkdir(packageDir, { recursive: true });
|
||||
|
||||
// Copy client files
|
||||
await execAsync(`cp -r ${clientDir}/* ${packageDir}/`, { cwd: clientDir });
|
||||
|
||||
// Create embedded config file
|
||||
const config = {
|
||||
server_url: serverUrl,
|
||||
server_id: serverId,
|
||||
api_key: apiKey,
|
||||
interval: 30
|
||||
};
|
||||
|
||||
const configPath = path.join(packageDir, 'client.conf');
|
||||
await fs.writeFile(configPath, JSON.stringify(config, null, 2));
|
||||
|
||||
// Update postinst to copy the embedded config
|
||||
const postinstPath = path.join(packageDir, 'debian/postinst');
|
||||
let postinst = await fs.readFile(postinstPath, 'utf8');
|
||||
postinst = postinst.replace(
|
||||
'echo "Please configure /etc/oculog/client.conf before starting the service."',
|
||||
`# Copy embedded configuration
|
||||
if [ -f /opt/oculog/client.conf ]; then
|
||||
cp /opt/oculog/client.conf /etc/oculog/client.conf
|
||||
chmod 600 /etc/oculog/client.conf
|
||||
echo "Configuration installed from package."
|
||||
fi
|
||||
|
||||
# Enable and start service
|
||||
systemctl daemon-reload
|
||||
systemctl enable oculog-client.service 2>/dev/null || true
|
||||
systemctl start oculog-client.service 2>/dev/null || true
|
||||
|
||||
echo "Oculog client installed and started successfully!"`
|
||||
);
|
||||
await fs.writeFile(postinstPath, postinst);
|
||||
|
||||
// Update debian/rules to include config file
|
||||
const rulesPath = path.join(packageDir, 'debian/rules');
|
||||
let rules = await fs.readFile(rulesPath, 'utf8');
|
||||
if (!rules.includes('client.conf')) {
|
||||
rules = rules.replace(
|
||||
'install -m 644 oculog-client.service',
|
||||
`install -m 644 client.conf $(CURDIR)/debian/oculog-client/opt/oculog/
|
||||
\tinstall -m 644 oculog-client.service`
|
||||
);
|
||||
}
|
||||
await fs.writeFile(rulesPath, rules);
|
||||
|
||||
// Build the package
|
||||
const { stdout, stderr } = await execAsync(
|
||||
'dpkg-buildpackage -b -us -uc',
|
||||
{ cwd: packageDir }
|
||||
);
|
||||
|
||||
// Find the generated .deb file
|
||||
const debFiles = await fs.readdir(buildDir);
|
||||
const debFile = debFiles.find(f => f.endsWith('.deb'));
|
||||
|
||||
if (!debFile) {
|
||||
throw new Error('Failed to build debian package');
|
||||
}
|
||||
|
||||
return path.join(buildDir, debFile);
|
||||
} catch (error) {
|
||||
console.error('Error building deb package:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
buildDebPackage
|
||||
};
|
||||
Reference in New Issue
Block a user