From c5f62a652d09d6e17149eb6a53a11ef02ee6f162 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Fri, 27 Mar 2026 11:09:45 +0200 Subject: [PATCH 01/20] Add throughput forecasting APIs and types Introduced new APIs for throughput forecasting, including endpoints for retrieving forecasts, accessing settings, and updating settings. Added corresponding types and a comprehensive test suite for the `ThroughputForecastingService`. Enhanced `WebhookForm` event options to support throughput limit alerts. --- apps/api/src/app.module.ts | 2 + .../interfaces/storage-port.interface.ts | 194 +++-- .../connection-registry.service.ts | 1 + apps/api/src/prometheus/prometheus.module.ts | 2 + apps/api/src/prometheus/prometheus.service.ts | 566 +++++++++---- apps/api/src/settings/settings.service.ts | 25 +- .../src/storage/adapters/base-sql.adapter.ts | 3 + .../src/storage/adapters/memory.adapter.ts | 521 ++++++++---- .../src/storage/adapters/postgres.adapter.ts | 674 +++++++++++----- .../src/storage/adapters/sqlite.adapter.ts | 750 ++++++++++++++---- .../throughput-forecasting.service.spec.ts | 554 +++++++++++++ .../throughput-forecasting.controller.ts | 27 + .../throughput-forecasting.module.ts | 13 + .../throughput-forecasting.service.ts | 340 ++++++++ apps/web/src/App.tsx | 15 + apps/web/src/api/metrics.ts | 15 + .../src/components/webhooks/WebhookForm.tsx | 1 + apps/web/src/pages/Settings.tsx | 58 +- apps/web/src/pages/ThroughputForecasting.tsx | 362 +++++++++ apps/web/src/types/throughput.ts | 32 + packages/shared/src/index.ts | 1 + packages/shared/src/types/settings.types.ts | 4 + packages/shared/src/types/throughput.types.ts | 32 + packages/shared/src/webhooks/types.ts | 14 + .../__tests__/throughput-limit.spec.ts | 81 ++ .../webhook-pro/webhook-events-pro.service.ts | 38 + 26 files changed, 3594 insertions(+), 731 deletions(-) create mode 100644 apps/api/src/throughput-forecasting/__tests__/throughput-forecasting.service.spec.ts create mode 100644 apps/api/src/throughput-forecasting/throughput-forecasting.controller.ts create mode 100644 apps/api/src/throughput-forecasting/throughput-forecasting.module.ts create mode 100644 apps/api/src/throughput-forecasting/throughput-forecasting.service.ts create mode 100644 apps/web/src/pages/ThroughputForecasting.tsx create mode 100644 apps/web/src/types/throughput.ts create mode 100644 packages/shared/src/types/throughput.types.ts create mode 100644 proprietary/webhook-pro/__tests__/throughput-limit.spec.ts diff --git a/apps/api/src/app.module.ts b/apps/api/src/app.module.ts index e63e8d3a..6b2bbfd5 100644 --- a/apps/api/src/app.module.ts +++ b/apps/api/src/app.module.ts @@ -18,6 +18,7 @@ import { TelemetryModule } from './telemetry/telemetry.module'; import { VectorSearchModule } from './vector-search/vector-search.module'; import { CloudAuthModule } from './auth/cloud-auth.module'; import { McpModule } from './mcp/mcp.module'; +import { ThroughputForecastingModule } from './throughput-forecasting/throughput-forecasting.module'; let AiModule: any = null; let LicenseModule: any = null; @@ -119,6 +120,7 @@ const baseImports = [ WebhooksModule, McpModule, VectorSearchModule, + ThroughputForecastingModule, ]; const proprietaryImports = [ diff --git a/apps/api/src/common/interfaces/storage-port.interface.ts b/apps/api/src/common/interfaces/storage-port.interface.ts index 7a7a60fe..06e38842 100644 --- a/apps/api/src/common/interfaces/storage-port.interface.ts +++ b/apps/api/src/common/interfaces/storage-port.interface.ts @@ -29,36 +29,29 @@ export type { VectorIndexSnapshot, VectorIndexSnapshotQueryOptions, } from '@betterdb/shared'; -import type { StoredAclEntry, AuditQueryOptions, AuditStats } from '@betterdb/shared'; +export type { ThroughputSettings } from '@betterdb/shared'; import type { - StoredClientSnapshot, + AppSettings, + AuditQueryOptions, + AuditStats, + ClientAnalyticsStats, ClientSnapshotQueryOptions, ClientTimeSeriesPoint, - ClientAnalyticsStats, - CommandDistributionParams, - CommandDistributionResponse, - IdleConnectionsParams, - IdleConnectionsResponse, - BufferAnomaliesParams, - BufferAnomaliesResponse, - ActivityTimelineParams, - ActivityTimelineResponse, - SpikeDetectionParams, - SpikeDetectionResponse, - AppSettings, - SettingsUpdateRequest, - KeyPatternSnapshot, - KeyPatternQueryOptions, - KeyAnalyticsSummary, + DatabaseConnectionConfig, HotKeyEntry, HotKeyQueryOptions, + KeyAnalyticsSummary, + KeyPatternQueryOptions, + KeyPatternSnapshot, + SettingsUpdateRequest, + StoredAclEntry, + StoredClientSnapshot, + ThroughputSettings, + VectorIndexSnapshot, + VectorIndexSnapshotQueryOptions, Webhook, WebhookDelivery, WebhookEventType, - DeliveryStatus, - DatabaseConnectionConfig, - VectorIndexSnapshot, - VectorIndexSnapshotQueryOptions, } from '@betterdb/shared'; // Anomaly Event Types @@ -120,24 +113,24 @@ export interface AnomalyStats { // Slow Log Entry Types export interface StoredSlowLogEntry { - id: number; // Original slowlog ID from Valkey/Redis - timestamp: number; // Unix timestamp in seconds - duration: number; // Microseconds - command: string[]; // Command name + args (e.g., ['GET', 'key1']) + id: number; // Original slowlog ID from Valkey/Redis + timestamp: number; // Unix timestamp in seconds + duration: number; // Microseconds + command: string[]; // Command name + args (e.g., ['GET', 'key1']) clientAddress: string; clientName: string; - capturedAt: number; // When we captured this entry (ms) + capturedAt: number; // When we captured this entry (ms) sourceHost: string; sourcePort: number; connectionId?: string; } export interface SlowLogQueryOptions { - startTime?: number; // Unix timestamp in seconds + startTime?: number; // Unix timestamp in seconds endTime?: number; command?: string; clientName?: string; - minDuration?: number; // Microseconds + minDuration?: number; // Microseconds limit?: number; offset?: number; connectionId?: string; @@ -147,26 +140,26 @@ export interface SlowLogQueryOptions { export type CommandLogType = 'slow' | 'large-request' | 'large-reply'; export interface StoredCommandLogEntry { - id: number; // Original commandlog ID from Valkey - timestamp: number; // Unix timestamp in seconds - duration: number; // Microseconds - command: string[]; // Command name + args + id: number; // Original commandlog ID from Valkey + timestamp: number; // Unix timestamp in seconds + duration: number; // Microseconds + command: string[]; // Command name + args clientAddress: string; clientName: string; - type: CommandLogType; // slow, large-request, or large-reply - capturedAt: number; // When we captured this entry (ms) + type: CommandLogType; // slow, large-request, or large-reply + capturedAt: number; // When we captured this entry (ms) sourceHost: string; sourcePort: number; connectionId?: string; } export interface CommandLogQueryOptions { - startTime?: number; // Unix timestamp in seconds + startTime?: number; // Unix timestamp in seconds endTime?: number; command?: string; clientName?: string; type?: CommandLogType; - minDuration?: number; // Microseconds + minDuration?: number; // Microseconds limit?: number; offset?: number; connectionId?: string; @@ -174,11 +167,11 @@ export interface CommandLogQueryOptions { // Latency Snapshot Types export interface StoredLatencySnapshot { - id: string; // UUID - timestamp: number; // When we captured this snapshot (ms) + id: string; // UUID + timestamp: number; // When we captured this snapshot (ms) eventName: string; - latestEventTimestamp: number; // Unix timestamp from LATENCY LATEST - maxLatency: number; // Microseconds + latestEventTimestamp: number; // Unix timestamp from LATENCY LATEST + maxLatency: number; // Microseconds connectionId?: string; } @@ -199,8 +192,8 @@ export interface StoredLatencyHistogram { // Memory Snapshot Types export interface StoredMemorySnapshot { - id: string; // UUID - timestamp: number; // When we captured this snapshot (ms) + id: string; // UUID + timestamp: number; // When we captured this snapshot (ms) usedMemory: number; usedMemoryRss: number; usedMemoryPeak: number; @@ -272,16 +265,34 @@ export interface StoragePort { // Client Analytics Methods - connectionId required for writes, optional filter for reads saveClientSnapshot(clients: StoredClientSnapshot[], connectionId: string): Promise; getClientSnapshots(options?: ClientSnapshotQueryOptions): Promise; - getClientTimeSeries(startTime: number, endTime: number, bucketSizeMs?: number, connectionId?: string): Promise; - getClientAnalyticsStats(startTime?: number, endTime?: number, connectionId?: string): Promise; - getClientConnectionHistory(identifier: { name?: string; user?: string; addr?: string }, startTime?: number, endTime?: number, connectionId?: string): Promise; + getClientTimeSeries( + startTime: number, + endTime: number, + bucketSizeMs?: number, + connectionId?: string, + ): Promise; + getClientAnalyticsStats( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise; + getClientConnectionHistory( + identifier: { name?: string; user?: string; addr?: string }, + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise; pruneOldClientSnapshots(olderThanTimestamp: number, connectionId?: string): Promise; // Anomaly Methods - connectionId required for writes, optional filter for reads saveAnomalyEvent(event: StoredAnomalyEvent, connectionId: string): Promise; saveAnomalyEvents(events: StoredAnomalyEvent[], connectionId: string): Promise; getAnomalyEvents(options?: AnomalyQueryOptions): Promise; - getAnomalyStats(startTime?: number, endTime?: number, connectionId?: string): Promise; + getAnomalyStats( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise; resolveAnomaly(id: string, resolvedAt: number): Promise; pruneOldAnomalyEvents(cutoffTimestamp: number, connectionId?: string): Promise; @@ -292,13 +303,24 @@ export interface StoragePort { // Key Analytics Methods - connectionId required for writes, optional filter for reads saveKeyPatternSnapshots(snapshots: KeyPatternSnapshot[], connectionId: string): Promise; getKeyPatternSnapshots(options?: KeyPatternQueryOptions): Promise; - getKeyAnalyticsSummary(startTime?: number, endTime?: number, connectionId?: string): Promise; - getKeyPatternTrends(pattern: string, startTime: number, endTime: number, connectionId?: string): Promise>; + getKeyAnalyticsSummary( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise; + getKeyPatternTrends( + pattern: string, + startTime: number, + endTime: number, + connectionId?: string, + ): Promise< + Array<{ + timestamp: number; + keyCount: number; + memoryBytes: number; + staleCount: number; + }> + >; pruneOldKeyPatternSnapshots(cutoffTimestamp: number, connectionId?: string): Promise; // Hot Key Stats Methods - connectionId required for writes, optional filter for reads @@ -316,14 +338,24 @@ export interface StoragePort { getWebhook(id: string): Promise; getWebhooksByInstance(connectionId?: string): Promise; getWebhooksByEvent(event: WebhookEventType, connectionId?: string): Promise; - updateWebhook(id: string, updates: Partial>): Promise; + updateWebhook( + id: string, + updates: Partial>, + ): Promise; deleteWebhook(id: string): Promise; // Webhook Delivery Methods - connectionId optional filter createDelivery(delivery: Omit): Promise; getDelivery(id: string): Promise; - getDeliveriesByWebhook(webhookId: string, limit?: number, offset?: number): Promise; - updateDelivery(id: string, updates: Partial>): Promise; + getDeliveriesByWebhook( + webhookId: string, + limit?: number, + offset?: number, + ): Promise; + updateDelivery( + id: string, + updates: Partial>, + ): Promise; getRetriableDeliveries(limit?: number, connectionId?: string): Promise; pruneOldDeliveries(cutoffTimestamp: number, connectionId?: string): Promise; @@ -346,7 +378,12 @@ export interface StoragePort { // Latency Histogram Methods saveLatencyHistogram(histogram: StoredLatencyHistogram, connectionId: string): Promise; - getLatencyHistograms(options?: { connectionId?: string; startTime?: number; endTime?: number; limit?: number }): Promise; + getLatencyHistograms(options?: { + connectionId?: string; + startTime?: number; + endTime?: number; + limit?: number; + }): Promise; pruneOldLatencyHistograms(cutoffTimestamp: number, connectionId?: string): Promise; // Memory Snapshot Methods - connectionId required for writes, optional filter for reads @@ -367,9 +404,44 @@ export interface StoragePort { updateConnection(id: string, updates: Partial): Promise; // Agent/MCP Token Methods (cloud-only, optional — implementations may no-op) - saveAgentToken(token: { id: string; name: string; type: 'agent' | 'mcp'; tokenHash: string; createdAt: number; expiresAt: number; revokedAt: number | null; lastUsedAt: number | null }): Promise; - getAgentTokens(type?: 'agent' | 'mcp'): Promise>; - getAgentTokenByHash(hash: string): Promise<{ id: string; name: string; type: 'agent' | 'mcp'; tokenHash: string; createdAt: number; expiresAt: number; revokedAt: number | null; lastUsedAt: number | null } | null>; + saveAgentToken(token: { + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + }): Promise; + getAgentTokens(type?: 'agent' | 'mcp'): Promise< + Array<{ + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + }> + >; + getAgentTokenByHash(hash: string): Promise<{ + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + } | null>; revokeAgentToken(id: string): Promise; updateAgentTokenLastUsed(id: string): Promise; + + // Throughput Forecasting Settings + getThroughputSettings(connectionId: string): Promise; + saveThroughputSettings(settings: ThroughputSettings): Promise; + deleteThroughputSettings(connectionId: string): Promise; + getActiveThroughputSettings(): Promise; } diff --git a/apps/api/src/connections/connection-registry.service.ts b/apps/api/src/connections/connection-registry.service.ts index e7f775a2..3824bcdb 100644 --- a/apps/api/src/connections/connection-registry.service.ts +++ b/apps/api/src/connections/connection-registry.service.ts @@ -9,6 +9,7 @@ import { EnvelopeEncryptionService, getEncryptionService } from '../common/utils import { RuntimeCapabilityTracker } from './runtime-capability-tracker.service'; import { UsageTelemetryService } from '../telemetry/usage-telemetry.service'; +// TODO: Export and use across the codebase instead of hardcoded 'env-default' strings const ENV_DEFAULT_ID = 'env-default'; @Injectable() diff --git a/apps/api/src/prometheus/prometheus.module.ts b/apps/api/src/prometheus/prometheus.module.ts index 4bd0c89b..1dfc8675 100644 --- a/apps/api/src/prometheus/prometheus.module.ts +++ b/apps/api/src/prometheus/prometheus.module.ts @@ -6,6 +6,7 @@ import { WebhooksModule } from '../webhooks/webhooks.module'; import { SlowLogAnalyticsModule } from '../slowlog-analytics/slowlog-analytics.module'; import { CommandLogAnalyticsModule } from '../commandlog-analytics/commandlog-analytics.module'; import { HealthModule } from '../health/health.module'; +import { ThroughputForecastingModule } from '../throughput-forecasting/throughput-forecasting.module'; @Module({ imports: [ @@ -14,6 +15,7 @@ import { HealthModule } from '../health/health.module'; SlowLogAnalyticsModule, CommandLogAnalyticsModule, forwardRef(() => HealthModule), + ThroughputForecastingModule, ], controllers: [PrometheusController], providers: [PrometheusService], diff --git a/apps/api/src/prometheus/prometheus.service.ts b/apps/api/src/prometheus/prometheus.service.ts index 24827c78..18bf2786 100644 --- a/apps/api/src/prometheus/prometheus.service.ts +++ b/apps/api/src/prometheus/prometheus.service.ts @@ -16,7 +16,12 @@ import { SlowLogAnalyticsService } from '../slowlog-analytics/slowlog-analytics. import { CommandLogAnalyticsService } from '../commandlog-analytics/commandlog-analytics.service'; import { HealthService } from '../health/health.service'; import { DatabasePort } from '../common/interfaces/database-port.interface'; -import { MultiConnectionPoller, ConnectionContext } from '../common/services/multi-connection-poller'; +import { InfoResponse } from '../common/types/metrics.types'; +import { + MultiConnectionPoller, + ConnectionContext, +} from '../common/services/multi-connection-poller'; +import { ThroughputForecastingService } from '../throughput-forecasting/throughput-forecasting.service'; // Per-connection state for tracking previous values and stale labels interface ConnectionMetricState { @@ -40,7 +45,7 @@ interface ConnectionMetricState { @Injectable() export class PrometheusService extends MultiConnectionPoller implements OnModuleInit { protected readonly logger = new Logger(PrometheusService.name); - private registry: Registry; + private readonly registry: Registry; private readonly pollIntervalMs: number; // Per-connection state tracking @@ -150,6 +155,9 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule private anomalyDetectionBufferMean: Gauge; private anomalyDetectionBufferStdDev: Gauge; + // Throughput Forecasting Metrics + private throughputTimeToLimitSeconds: Gauge; + constructor( @Inject('STORAGE_CLIENT') private storage: StoragePort, connectionRegistry: ConnectionRegistry, @@ -159,8 +167,14 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule private readonly commandLogAnalytics: CommandLogAnalyticsService, @Inject(forwardRef(() => HealthService)) private readonly healthService: HealthService, @Optional() private readonly webhookDispatcher?: WebhookDispatcherService, - @Optional() @Inject(WEBHOOK_EVENTS_PRO_SERVICE) private readonly webhookEventsProService?: IWebhookEventsProService, - @Optional() @Inject(WEBHOOK_EVENTS_ENTERPRISE_SERVICE) private readonly webhookEventsEnterpriseService?: IWebhookEventsEnterpriseService, + @Optional() + @Inject(WEBHOOK_EVENTS_PRO_SERVICE) + private readonly webhookEventsProService?: IWebhookEventsProService, + @Optional() + @Inject(WEBHOOK_EVENTS_ENTERPRISE_SERVICE) + private readonly webhookEventsEnterpriseService?: IWebhookEventsEnterpriseService, + @Optional() + private readonly throughputForecastingService?: ThroughputForecastingService, ) { super(connectionRegistry); this.pollIntervalMs = this.configService.get('PROMETHEUS_POLL_INTERVAL_MS', 5000); @@ -210,7 +224,7 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule } /** - * Get or create per-connection metric state + * Get or create a per-connection metric state */ private getConnectionState(connectionId: string): ConnectionMetricState { if (!this.perConnectionState.has(connectionId)) { @@ -242,7 +256,7 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule } /** - * Create a Gauge with connection label always included + * Create a Gauge with a connection label always included */ private createGauge(name: string, help: string, additionalLabels?: string[]): Gauge { return new Gauge({ @@ -256,49 +270,135 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule private initializeMetrics(): void { // ACL Audit (storage-based, per-connection) this.aclDeniedTotal = this.createGauge('acl_denied', 'Total ACL denied events captured'); - this.aclDeniedByReason = this.createGauge('acl_denied_by_reason', 'ACL denied events by reason', ['reason']); - this.aclDeniedByUser = this.createGauge('acl_denied_by_user', 'ACL denied events by username', ['username']); + this.aclDeniedByReason = this.createGauge( + 'acl_denied_by_reason', + 'ACL denied events by reason', + ['reason'], + ); + this.aclDeniedByUser = this.createGauge('acl_denied_by_user', 'ACL denied events by username', [ + 'username', + ]); // Client Analytics (storage-based, per-connection) - this.clientConnectionsCurrent = this.createGauge('client_connections_current', 'Current number of client connections'); - this.clientConnectionsByName = this.createGauge('client_connections_by_name', 'Current connections by client name', ['client_name']); - this.clientConnectionsByUser = this.createGauge('client_connections_by_user', 'Current connections by ACL user', ['user']); - this.clientConnectionsPeak = this.createGauge('client_connections_peak', 'Peak connections in retention period'); + this.clientConnectionsCurrent = this.createGauge( + 'client_connections_current', + 'Current number of client connections', + ); + this.clientConnectionsByName = this.createGauge( + 'client_connections_by_name', + 'Current connections by client name', + ['client_name'], + ); + this.clientConnectionsByUser = this.createGauge( + 'client_connections_by_user', + 'Current connections by ACL user', + ['user'], + ); + this.clientConnectionsPeak = this.createGauge( + 'client_connections_peak', + 'Peak connections in retention period', + ); // Slowlog Patterns (storage-based, per-connection) - this.slowlogPatternCount = this.createGauge('slowlog_pattern_count', 'Number of slow queries per pattern', ['pattern']); - this.slowlogPatternDuration = this.createGauge('slowlog_pattern_avg_duration_us', 'Average duration in microseconds per pattern', ['pattern']); - this.slowlogPatternPercentage = this.createGauge('slowlog_pattern_percentage', 'Percentage of slow queries per pattern', ['pattern']); + this.slowlogPatternCount = this.createGauge( + 'slowlog_pattern_count', + 'Number of slow queries per pattern', + ['pattern'], + ); + this.slowlogPatternDuration = this.createGauge( + 'slowlog_pattern_avg_duration_us', + 'Average duration in microseconds per pattern', + ['pattern'], + ); + this.slowlogPatternPercentage = this.createGauge( + 'slowlog_pattern_percentage', + 'Percentage of slow queries per pattern', + ['pattern'], + ); // COMMANDLOG (Valkey 8.1+) - storage-based, per-connection - this.commandlogLargeRequestCount = this.createGauge('commandlog_large_request', 'Total large request entries'); - this.commandlogLargeReplyCount = this.createGauge('commandlog_large_reply', 'Total large reply entries'); - this.commandlogLargeRequestByPattern = this.createGauge('commandlog_large_request_by_pattern', 'Large request count by command pattern', ['pattern']); - this.commandlogLargeReplyByPattern = this.createGauge('commandlog_large_reply_by_pattern', 'Large reply count by command pattern', ['pattern']); + this.commandlogLargeRequestCount = this.createGauge( + 'commandlog_large_request', + 'Total large request entries', + ); + this.commandlogLargeReplyCount = this.createGauge( + 'commandlog_large_reply', + 'Total large reply entries', + ); + this.commandlogLargeRequestByPattern = this.createGauge( + 'commandlog_large_request_by_pattern', + 'Large request count by command pattern', + ['pattern'], + ); + this.commandlogLargeReplyByPattern = this.createGauge( + 'commandlog_large_reply_by_pattern', + 'Large reply count by command pattern', + ['pattern'], + ); // Standard INFO - Server (per connection) this.uptimeInSeconds = this.createGauge('uptime_in_seconds', 'Server uptime in seconds'); - this.instanceInfo = this.createGauge('instance_info', 'Instance information (always 1)', ['version', 'role', 'os']); + this.instanceInfo = this.createGauge('instance_info', 'Instance information (always 1)', [ + 'version', + 'role', + 'os', + ]); // Standard INFO - Clients (per connection) this.connectedClients = this.createGauge('connected_clients', 'Number of client connections'); - this.blockedClients = this.createGauge('blocked_clients', 'Clients blocked on BLPOP, BRPOP, etc'); - this.trackingClients = this.createGauge('tracking_clients', 'Clients being tracked for client-side caching'); + this.blockedClients = this.createGauge( + 'blocked_clients', + 'Clients blocked on BLPOP, BRPOP, etc', + ); + this.trackingClients = this.createGauge( + 'tracking_clients', + 'Clients being tracked for client-side caching', + ); // Standard INFO - Memory (per connection) this.memoryUsedBytes = this.createGauge('memory_used_bytes', 'Total allocated memory in bytes'); - this.memoryUsedRssBytes = this.createGauge('memory_used_rss_bytes', 'RSS memory usage in bytes'); - this.memoryUsedPeakBytes = this.createGauge('memory_used_peak_bytes', 'Peak memory usage in bytes'); - this.memoryMaxBytes = this.createGauge('memory_max_bytes', 'Maximum memory limit in bytes (0 if unlimited)'); - this.memoryFragmentationRatio = this.createGauge('memory_fragmentation_ratio', 'Memory fragmentation ratio'); - this.memoryFragmentationBytes = this.createGauge('memory_fragmentation_bytes', 'Memory fragmentation in bytes'); + this.memoryUsedRssBytes = this.createGauge( + 'memory_used_rss_bytes', + 'RSS memory usage in bytes', + ); + this.memoryUsedPeakBytes = this.createGauge( + 'memory_used_peak_bytes', + 'Peak memory usage in bytes', + ); + this.memoryMaxBytes = this.createGauge( + 'memory_max_bytes', + 'Maximum memory limit in bytes (0 if unlimited)', + ); + this.memoryFragmentationRatio = this.createGauge( + 'memory_fragmentation_ratio', + 'Memory fragmentation ratio', + ); + this.memoryFragmentationBytes = this.createGauge( + 'memory_fragmentation_bytes', + 'Memory fragmentation in bytes', + ); // Standard INFO - Stats (per connection) - this.connectionsReceivedTotal = this.createGauge('connections_received_total', 'Total connections received'); - this.commandsProcessedTotal = this.createGauge('commands_processed_total', 'Total commands processed'); - this.instantaneousOpsPerSec = this.createGauge('instantaneous_ops_per_sec', 'Current operations per second'); - this.instantaneousInputKbps = this.createGauge('instantaneous_input_kbps', 'Current input kilobytes per second'); - this.instantaneousOutputKbps = this.createGauge('instantaneous_output_kbps', 'Current output kilobytes per second'); + this.connectionsReceivedTotal = this.createGauge( + 'connections_received_total', + 'Total connections received', + ); + this.commandsProcessedTotal = this.createGauge( + 'commands_processed_total', + 'Total commands processed', + ); + this.instantaneousOpsPerSec = this.createGauge( + 'instantaneous_ops_per_sec', + 'Current operations per second', + ); + this.instantaneousInputKbps = this.createGauge( + 'instantaneous_input_kbps', + 'Current input kilobytes per second', + ); + this.instantaneousOutputKbps = this.createGauge( + 'instantaneous_output_kbps', + 'Current output kilobytes per second', + ); this.keyspaceHitsTotal = this.createGauge('keyspace_hits_total', 'Total keyspace hits'); this.keyspaceMissesTotal = this.createGauge('keyspace_misses_total', 'Total keyspace misses'); this.evictedKeysTotal = this.createGauge('evicted_keys_total', 'Total evicted keys'); @@ -309,32 +409,67 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule // Standard INFO - Replication (per connection) this.connectedSlaves = this.createGauge('connected_slaves', 'Number of connected replicas'); this.replicationOffset = this.createGauge('replication_offset', 'Replication offset'); - this.masterLinkUp = this.createGauge('master_link_up', '1 if link to master is up (replica only)'); - this.masterLastIoSecondsAgo = this.createGauge('master_last_io_seconds_ago', 'Seconds since last I/O with master (replica only)'); + this.masterLinkUp = this.createGauge( + 'master_link_up', + '1 if link to master is up (replica only)', + ); + this.masterLastIoSecondsAgo = this.createGauge( + 'master_last_io_seconds_ago', + 'Seconds since last I/O with master (replica only)', + ); // Keyspace Metrics (per connection, per database) this.dbKeys = this.createGauge('db_keys', 'Total keys in database', ['db']); - this.dbKeysExpiring = this.createGauge('db_keys_expiring', 'Keys with expiration in database', ['db']); + this.dbKeysExpiring = this.createGauge('db_keys_expiring', 'Keys with expiration in database', [ + 'db', + ]); this.dbAvgTtlSeconds = this.createGauge('db_avg_ttl_seconds', 'Average TTL in seconds', ['db']); // Cluster Metrics (per connection) this.clusterEnabled = this.createGauge('cluster_enabled', '1 if cluster mode is enabled'); - this.clusterKnownNodes = this.createGauge('cluster_known_nodes', 'Number of known cluster nodes'); + this.clusterKnownNodes = this.createGauge( + 'cluster_known_nodes', + 'Number of known cluster nodes', + ); this.clusterSize = this.createGauge('cluster_size', 'Number of master nodes in cluster'); - this.clusterSlotsAssigned = this.createGauge('cluster_slots_assigned', 'Number of assigned slots'); + this.clusterSlotsAssigned = this.createGauge( + 'cluster_slots_assigned', + 'Number of assigned slots', + ); this.clusterSlotsOk = this.createGauge('cluster_slots_ok', 'Number of slots in OK state'); this.clusterSlotsFail = this.createGauge('cluster_slots_fail', 'Number of slots in FAIL state'); - this.clusterSlotsPfail = this.createGauge('cluster_slots_pfail', 'Number of slots in PFAIL state'); + this.clusterSlotsPfail = this.createGauge( + 'cluster_slots_pfail', + 'Number of slots in PFAIL state', + ); // Cluster Slot Metrics (Valkey 8.0+) - per connection, per slot this.clusterSlotKeys = this.createGauge('cluster_slot_keys', 'Keys in cluster slot', ['slot']); - this.clusterSlotExpires = this.createGauge('cluster_slot_expires', 'Expiring keys in cluster slot', ['slot']); - this.clusterSlotReadsTotal = this.createGauge('cluster_slot_reads_total', 'Total reads for cluster slot', ['slot']); - this.clusterSlotWritesTotal = this.createGauge('cluster_slot_writes_total', 'Total writes for cluster slot', ['slot']); + this.clusterSlotExpires = this.createGauge( + 'cluster_slot_expires', + 'Expiring keys in cluster slot', + ['slot'], + ); + this.clusterSlotReadsTotal = this.createGauge( + 'cluster_slot_reads_total', + 'Total reads for cluster slot', + ['slot'], + ); + this.clusterSlotWritesTotal = this.createGauge( + 'cluster_slot_writes_total', + 'Total writes for cluster slot', + ['slot'], + ); // CPU Metrics (per connection) - this.cpuSysSecondsTotal = this.createGauge('cpu_sys_seconds_total', 'System CPU consumed by the server'); - this.cpuUserSecondsTotal = this.createGauge('cpu_user_seconds_total', 'User CPU consumed by the server'); + this.cpuSysSecondsTotal = this.createGauge( + 'cpu_sys_seconds_total', + 'System CPU consumed by the server', + ); + this.cpuUserSecondsTotal = this.createGauge( + 'cpu_user_seconds_total', + 'User CPU consumed by the server', + ); // Slowlog Raw Metrics (per connection) this.slowlogLength = this.createGauge('slowlog_length', 'Current slowlog length'); @@ -370,14 +505,50 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule labelNames: ['connection', 'pattern', 'severity'], registers: [this.registry], }); - this.anomalyEventsCurrent = this.createGauge('anomaly_events_current', 'Unresolved anomalies', ['severity']); - this.anomalyBySeverity = this.createGauge('anomaly_by_severity', 'Anomalies in last hour by severity', ['severity']); - this.anomalyByMetric = this.createGauge('anomaly_by_metric', 'Anomalies in last hour by metric', ['metric_type']); - this.correlatedGroupsBySeverity = this.createGauge('correlated_groups_by_severity', 'Groups in last hour by severity', ['severity']); - this.correlatedGroupsByPattern = this.createGauge('correlated_groups_by_pattern', 'Groups in last hour by pattern', ['pattern']); - this.anomalyDetectionBufferReady = this.createGauge('anomaly_buffer_ready', 'Buffer ready state (1=ready, 0=warming)', ['metric_type']); - this.anomalyDetectionBufferMean = this.createGauge('anomaly_buffer_mean', 'Rolling mean for anomaly detection', ['metric_type']); - this.anomalyDetectionBufferStdDev = this.createGauge('anomaly_buffer_stddev', 'Rolling stddev for anomaly detection', ['metric_type']); + this.anomalyEventsCurrent = this.createGauge('anomaly_events_current', 'Unresolved anomalies', [ + 'severity', + ]); + this.anomalyBySeverity = this.createGauge( + 'anomaly_by_severity', + 'Anomalies in last hour by severity', + ['severity'], + ); + this.anomalyByMetric = this.createGauge( + 'anomaly_by_metric', + 'Anomalies in last hour by metric', + ['metric_type'], + ); + this.correlatedGroupsBySeverity = this.createGauge( + 'correlated_groups_by_severity', + 'Groups in last hour by severity', + ['severity'], + ); + this.correlatedGroupsByPattern = this.createGauge( + 'correlated_groups_by_pattern', + 'Groups in last hour by pattern', + ['pattern'], + ); + this.anomalyDetectionBufferReady = this.createGauge( + 'anomaly_buffer_ready', + 'Buffer ready state (1=ready, 0=warming)', + ['metric_type'], + ); + this.anomalyDetectionBufferMean = this.createGauge( + 'anomaly_buffer_mean', + 'Rolling mean for anomaly detection', + ['metric_type'], + ); + this.anomalyDetectionBufferStdDev = this.createGauge( + 'anomaly_buffer_stddev', + 'Rolling stddev for anomaly detection', + ['metric_type'], + ); + + // Throughput Forecasting + this.throughputTimeToLimitSeconds = this.createGauge( + 'throughput_time_to_limit_seconds', + 'Projected seconds until ops/sec reaches configured ceiling. Only exported when a ceiling is configured.', + ); } /** @@ -385,7 +556,7 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule */ async updateMetrics(): Promise { const connections = this.connectionRegistry.list(); - const connectedConnections = connections.filter(c => c.isConnected); + const connectedConnections = connections.filter((c) => c.isConnected); // Update both INFO-based and storage-based metrics for all connections for (const conn of connectedConnections) { @@ -394,7 +565,7 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule await this.updateStorageBasedMetricsForConnection(conn.id); } catch (error) { this.logger.warn( - `Failed to update metrics for connection ${conn.name}: ${error instanceof Error ? error.message : 'Unknown'}` + `Failed to update metrics for connection ${conn.name}: ${error instanceof Error ? error.message : 'Unknown'}`, ); } } @@ -411,6 +582,23 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule await this.updateClientMetrics(connectionId, connLabel, state); await this.updateSlowlogMetrics(connectionId, connLabel, state); await this.updateCommandlogMetrics(connectionId, connLabel, state); + await this.updateThroughputMetrics(connectionId, connLabel); + } + + private async updateThroughputMetrics(connectionId: string, connLabel: string): Promise { + if (!this.throughputForecastingService) { + return; + } + try { + const forecast = await this.throughputForecastingService.getForecast(connectionId); + if (forecast.opsCeiling !== null && !forecast.insufficientData && forecast.enabled) { + const value = forecast.timeToLimitMs !== null ? forecast.timeToLimitMs / 1000 : -1; + this.throughputTimeToLimitSeconds.labels(connLabel).set(value); + } + } catch { + this.logger.warn(`Failed to update throughput metrics for connection ${connectionId}`); + // Silently skip if forecasting unavailable + } } /** @@ -444,7 +632,7 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule } } - private updateServerMetrics(info: Record, connLabel: string): void { + private updateServerMetrics(info: InfoResponse, connLabel: string): void { if (!info.server) return; const version = info.server.valkey_version || info.server.redis_version || 'unknown'; @@ -456,10 +644,10 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule } private updateClientInfoMetrics( - info: Record, + info: InfoResponse, connLabel: string, connectionId: string, - config: { host: string; port: number } | null, + _config: { host: string; port: number } | null, ): void { if (!info.clients) return; @@ -475,27 +663,29 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule // Webhook dispatch for connection.critical if (this.webhookDispatcher && maxClients > 0) { const usedPercent = (connectedClients / maxClients) * 100; - this.webhookDispatcher.dispatchThresholdAlertPerWebhook( - WebhookEventType.CONNECTION_CRITICAL, - 'connection_critical', - usedPercent, - 'connectionCriticalPercent', - true, - { - currentConnections: connectedClients, - maxConnections: maxClients, - usedPercent: parseFloat(usedPercent.toFixed(2)), - message: `Connection usage critical: ${usedPercent.toFixed(1)}% (${connectedClients} / ${maxClients})`, - }, - connectionId - ).catch(err => { - this.logger.error('Failed to dispatch connection.critical webhook', err); - }); + this.webhookDispatcher + .dispatchThresholdAlertPerWebhook( + WebhookEventType.CONNECTION_CRITICAL, + 'connection_critical', + usedPercent, + 'connectionCriticalPercent', + true, + { + currentConnections: connectedClients, + maxConnections: maxClients, + usedPercent: parseFloat(usedPercent.toFixed(2)), + message: `Connection usage critical: ${usedPercent.toFixed(1)}% (${connectedClients} / ${maxClients})`, + }, + connectionId, + ) + .catch((err) => { + this.logger.error('Failed to dispatch connection.critical webhook', err); + }); } } private updateMemoryMetrics( - info: Record, + info: InfoResponse, connLabel: string, connectionId: string, config: { host: string; port: number } | null, @@ -510,57 +700,79 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule this.memoryUsedRssBytes.labels(connLabel).set(parseInt(info.memory.used_memory_rss) || 0); this.memoryUsedPeakBytes.labels(connLabel).set(parseInt(info.memory.used_memory_peak) || 0); this.memoryMaxBytes.labels(connLabel).set(maxMemory); - this.memoryFragmentationRatio.labels(connLabel).set(parseFloat(info.memory.mem_fragmentation_ratio) || 0); - this.memoryFragmentationBytes.labels(connLabel).set(parseInt(info.memory.mem_fragmentation_bytes) || 0); + this.memoryFragmentationRatio + .labels(connLabel) + .set(parseFloat(info.memory.mem_fragmentation_ratio) || 0); + this.memoryFragmentationBytes + .labels(connLabel) + .set(parseInt(info.memory.mem_fragmentation_bytes) || 0); if (this.webhookDispatcher && maxMemory > 0) { const usedPercent = (memoryUsed / maxMemory) * 100; - this.webhookDispatcher.dispatchThresholdAlertPerWebhook( - WebhookEventType.MEMORY_CRITICAL, - 'memory_critical', - usedPercent, - 'memoryCriticalPercent', - true, - { - usedBytes: memoryUsed, - maxBytes: maxMemory, - usedPercent: parseFloat(usedPercent.toFixed(2)), - usedMemoryHuman: this.formatBytes(memoryUsed), - maxMemoryHuman: this.formatBytes(maxMemory), - message: `Memory usage critical: ${usedPercent.toFixed(1)}% (${this.formatBytes(memoryUsed)} / ${this.formatBytes(maxMemory)})`, - }, - connectionId - ).catch(err => { - this.logger.error('Failed to dispatch memory.critical webhook', err); - }); - - // Compliance alert for enterprise tier - if (usedPercent > 80 && maxmemoryPolicy === 'noeviction' && this.webhookEventsEnterpriseService) { - this.webhookEventsEnterpriseService.dispatchComplianceAlert({ - complianceType: 'data_retention', - severity: 'high', - memoryUsedPercent: usedPercent, - maxmemoryPolicy, - message: `Compliance alert: Memory at ${usedPercent.toFixed(1)}% with 'noeviction' policy may cause data loss and violate retention policies`, - timestamp: Date.now(), - instance: { host: config?.host || 'localhost', port: config?.port || 6379 }, + this.webhookDispatcher + .dispatchThresholdAlertPerWebhook( + WebhookEventType.MEMORY_CRITICAL, + 'memory_critical', + usedPercent, + 'memoryCriticalPercent', + true, + { + usedBytes: memoryUsed, + maxBytes: maxMemory, + usedPercent: parseFloat(usedPercent.toFixed(2)), + usedMemoryHuman: this.formatBytes(memoryUsed), + maxMemoryHuman: this.formatBytes(maxMemory), + message: `Memory usage critical: ${usedPercent.toFixed(1)}% (${this.formatBytes(memoryUsed)} / ${this.formatBytes(maxMemory)})`, + }, connectionId, - }).catch(err => { - this.logger.error('Failed to dispatch compliance.alert webhook', err); + ) + .catch((err) => { + this.logger.error('Failed to dispatch memory.critical webhook', err); }); + + // Compliance alert for enterprise tier + if ( + usedPercent > 80 && + maxmemoryPolicy === 'noeviction' && + this.webhookEventsEnterpriseService + ) { + this.webhookEventsEnterpriseService + .dispatchComplianceAlert({ + complianceType: 'data_retention', + severity: 'high', + memoryUsedPercent: usedPercent, + maxmemoryPolicy, + message: `Compliance alert: Memory at ${usedPercent.toFixed(1)}% with 'noeviction' policy may cause data loss and violate retention policies`, + timestamp: Date.now(), + instance: { host: config?.host || 'localhost', port: config?.port || 6379 }, + connectionId, + }) + .catch((err) => { + this.logger.error('Failed to dispatch compliance.alert webhook', err); + }); } } } - private updateStatsMetrics(info: Record, connLabel: string): void { + private updateStatsMetrics(info: InfoResponse, connLabel: string): void { if (!info.stats) return; - this.connectionsReceivedTotal.labels(connLabel).set(parseInt(info.stats.total_connections_received) || 0); - this.commandsProcessedTotal.labels(connLabel).set(parseInt(info.stats.total_commands_processed) || 0); - this.instantaneousOpsPerSec.labels(connLabel).set(parseInt(info.stats.instantaneous_ops_per_sec) || 0); - this.instantaneousInputKbps.labels(connLabel).set(parseFloat(info.stats.instantaneous_input_kbps) || 0); - this.instantaneousOutputKbps.labels(connLabel).set(parseFloat(info.stats.instantaneous_output_kbps) || 0); + this.connectionsReceivedTotal + .labels(connLabel) + .set(parseInt(info.stats.total_connections_received) || 0); + this.commandsProcessedTotal + .labels(connLabel) + .set(parseInt(info.stats.total_commands_processed) || 0); + this.instantaneousOpsPerSec + .labels(connLabel) + .set(parseInt(info.stats.instantaneous_ops_per_sec) || 0); + this.instantaneousInputKbps + .labels(connLabel) + .set(parseFloat(info.stats.instantaneous_input_kbps) || 0); + this.instantaneousOutputKbps + .labels(connLabel) + .set(parseFloat(info.stats.instantaneous_output_kbps) || 0); this.keyspaceHitsTotal.labels(connLabel).set(parseInt(info.stats.keyspace_hits) || 0); this.keyspaceMissesTotal.labels(connLabel).set(parseInt(info.stats.keyspace_misses) || 0); this.evictedKeysTotal.labels(connLabel).set(parseInt(info.stats.evicted_keys) || 0); @@ -569,7 +781,7 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule this.pubsubPatterns.labels(connLabel).set(parseInt(info.stats.pubsub_patterns) || 0); } - private updateCpuMetrics(info: Record, connLabel: string): void { + private updateCpuMetrics(info: InfoResponse, connLabel: string): void { if (!info.cpu) return; this.cpuSysSecondsTotal.labels(connLabel).set(parseFloat(info.cpu.used_cpu_sys) || 0); @@ -577,7 +789,7 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule } private updateReplicationMetrics( - info: Record, + info: InfoResponse, connLabel: string, connectionId: string, config: { host: string; port: number } | null, @@ -587,41 +799,49 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule const role = info.replication.role; if (role === 'master') { - this.connectedSlaves.labels(connLabel).set(parseInt(info.replication.connected_slaves || '0') || 0); + this.connectedSlaves + .labels(connLabel) + .set(parseInt(info.replication.connected_slaves || '0') || 0); if (info.replication.master_repl_offset) { - this.replicationOffset.labels(connLabel).set(parseInt(info.replication.master_repl_offset) || 0); + this.replicationOffset + .labels(connLabel) + .set(parseInt(info.replication.master_repl_offset) || 0); } } else if (role === 'slave') { const masterLinkStatus = info.replication.master_link_status; this.masterLinkUp.labels(connLabel).set(masterLinkStatus === 'up' ? 1 : 0); - const lastIoSecondsAgo = parseInt(info.replication.master_last_io_seconds_ago) || 0; + const lastIoSecondsAgo = parseInt(info.replication.master_last_io_seconds_ago ?? '') || 0; if (info.replication.master_last_io_seconds_ago) { this.masterLastIoSecondsAgo.labels(connLabel).set(lastIoSecondsAgo); } if (info.replication.slave_repl_offset) { - this.replicationOffset.labels(connLabel).set(parseInt(info.replication.slave_repl_offset) || 0); + this.replicationOffset + .labels(connLabel) + .set(parseInt(info.replication.slave_repl_offset) || 0); } // Webhook dispatch for replication.lag if (this.webhookEventsProService && masterLinkStatus === 'up') { - this.webhookEventsProService.dispatchReplicationLag({ - lagSeconds: lastIoSecondsAgo, - threshold: 10, - masterLinkStatus, - timestamp: Date.now(), - instance: { host: config?.host || 'localhost', port: config?.port || 6379 }, - connectionId, - }).catch(err => { - this.logger.error('Failed to dispatch replication.lag webhook', err); - }); + this.webhookEventsProService + .dispatchReplicationLag({ + lagSeconds: lastIoSecondsAgo, + threshold: 10, + masterLinkStatus, + timestamp: Date.now(), + instance: { host: config?.host || 'localhost', port: config?.port || 6379 }, + connectionId, + }) + .catch((err) => { + this.logger.error('Failed to dispatch replication.lag webhook', err); + }); } } } private updateKeyspaceMetricsFromInfo( - info: Record, + info: InfoResponse, connLabel: string, state: ConnectionMetricState, ): void { @@ -635,7 +855,9 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule if (typeof dbInfo === 'string') { const parts = dbInfo.split(','); - let keys = 0, expires = 0, avgTtl = 0; + let keys = 0, + expires = 0, + avgTtl = 0; for (const part of parts) { const [key, value] = part.split('='); @@ -669,7 +891,7 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule private async updateClusterMetricsFromInfo( client: DatabasePort, - info: Record, + info: InfoResponse, connLabel: string, connectionId: string, state: ConnectionMetricState, @@ -691,13 +913,17 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule const slotsFail = parseInt(clusterInfo.cluster_slots_fail) || 0; if (clusterInfo.cluster_known_nodes) { - this.clusterKnownNodes.labels(connLabel).set(parseInt(clusterInfo.cluster_known_nodes) || 0); + this.clusterKnownNodes + .labels(connLabel) + .set(parseInt(clusterInfo.cluster_known_nodes) || 0); } if (clusterInfo.cluster_size) { this.clusterSize.labels(connLabel).set(parseInt(clusterInfo.cluster_size) || 0); } if (clusterInfo.cluster_slots_assigned) { - this.clusterSlotsAssigned.labels(connLabel).set(parseInt(clusterInfo.cluster_slots_assigned) || 0); + this.clusterSlotsAssigned + .labels(connLabel) + .set(parseInt(clusterInfo.cluster_slots_assigned) || 0); } if (clusterInfo.cluster_slots_ok) { this.clusterSlotsOk.labels(connLabel).set(parseInt(clusterInfo.cluster_slots_ok) || 0); @@ -706,7 +932,9 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule this.clusterSlotsFail.labels(connLabel).set(slotsFail); } if (clusterInfo.cluster_slots_pfail) { - this.clusterSlotsPfail.labels(connLabel).set(parseInt(clusterInfo.cluster_slots_pfail) || 0); + this.clusterSlotsPfail + .labels(connLabel) + .set(parseInt(clusterInfo.cluster_slots_pfail) || 0); } // Webhook dispatch for cluster.failover @@ -736,7 +964,10 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule } const capabilities = client.getCapabilities(); - if (capabilities.hasClusterSlotStats && this.runtimeCapabilityTracker.isAvailable(connectionId, 'canClusterSlotStats')) { + if ( + capabilities.hasClusterSlotStats && + this.runtimeCapabilityTracker.isAvailable(connectionId, 'canClusterSlotStats') + ) { try { const newSlotLabels = new Set(); const slotStats = await client.getClusterSlotStats('key-count', 100); @@ -760,12 +991,20 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule state.currentClusterSlotLabels = newSlotLabels; } catch (slotStatsError) { - this.runtimeCapabilityTracker.recordFailure(connectionId, 'canClusterSlotStats', slotStatsError instanceof Error ? slotStatsError : String(slotStatsError)); + this.runtimeCapabilityTracker.recordFailure( + connectionId, + 'canClusterSlotStats', + slotStatsError instanceof Error ? slotStatsError : String(slotStatsError), + ); this.logger.error(`Failed to update cluster slot stats for ${connLabel}`, slotStatsError); } } } catch (error) { - this.runtimeCapabilityTracker.recordFailure(connectionId, 'canClusterInfo', error instanceof Error ? error : String(error)); + this.runtimeCapabilityTracker.recordFailure( + connectionId, + 'canClusterInfo', + error instanceof Error ? error : String(error), + ); this.logger.error(`Failed to update cluster metrics for ${connLabel}`, error); } } @@ -903,7 +1142,10 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule const newRequestPatternLabels = new Set(); const newReplyPatternLabels = new Set(); - const requestAnalysis = this.commandLogAnalytics.getCachedAnalysis('large-request', connectionId); + const requestAnalysis = this.commandLogAnalytics.getCachedAnalysis( + 'large-request', + connectionId, + ); let requestTotal = 0; if (requestAnalysis) { for (const p of requestAnalysis.patterns) { @@ -964,18 +1206,24 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule // Webhook dispatch for slowlog.threshold if (this.webhookEventsProService) { - this.webhookEventsProService.dispatchSlowlogThreshold({ - slowlogCount: length, - threshold: 100, - timestamp: Date.now(), - instance: { host: config?.host || 'localhost', port: config?.port || 6379 }, - connectionId, - }).catch(err => { - this.logger.error('Failed to dispatch slowlog.threshold webhook', err); - }); + this.webhookEventsProService + .dispatchSlowlogThreshold({ + slowlogCount: length, + threshold: 100, + timestamp: Date.now(), + instance: { host: config?.host || 'localhost', port: config?.port || 6379 }, + connectionId, + }) + .catch((err) => { + this.logger.error('Failed to dispatch slowlog.threshold webhook', err); + }); } } catch (error) { - this.runtimeCapabilityTracker.recordFailure(connectionId, 'canSlowLog', error instanceof Error ? error : String(error)); + this.runtimeCapabilityTracker.recordFailure( + connectionId, + 'canSlowLog', + error instanceof Error ? error : String(error), + ); this.logger.error(`Failed to update slowlog raw metrics for ${connLabel}`, error); } } @@ -985,7 +1233,7 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule const metrics = await this.registry.metrics(); return metrics .split('\n') - .filter(line => !line.match(/\s+[Nn]a[Nn]\s*$/)) + .filter((line) => !line.match(/\s+[Nn]a[Nn]\s*$/)) .join('\n'); } @@ -1003,9 +1251,19 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule return this.pollDuration.startTimer({ connection: connLabel, service }); } - incrementAnomalyEvent(severity: string, metricType: string, anomalyType: string, connectionId?: string): void { + incrementAnomalyEvent( + severity: string, + metricType: string, + anomalyType: string, + connectionId?: string, + ): void { const connLabel = connectionId ? this.getConnectionLabel(connectionId) : 'unknown'; - this.anomalyEventsTotal.inc({ connection: connLabel, severity, metric_type: metricType, anomaly_type: anomalyType }); + this.anomalyEventsTotal.inc({ + connection: connLabel, + severity, + metric_type: metricType, + anomaly_type: anomalyType, + }); } incrementCorrelatedGroup(pattern: string, severity: string, connectionId?: string): void { @@ -1022,7 +1280,8 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule }, connectionId?: string, ): void { - const effectiveConnectionId = connectionId || this.connectionRegistry.getDefaultId() || 'unknown'; + const effectiveConnectionId = + connectionId || this.connectionRegistry.getDefaultId() || 'unknown'; const connLabel = this.getConnectionLabel(effectiveConnectionId); const state = this.getConnectionState(effectiveConnectionId); @@ -1063,7 +1322,8 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule buffers: Array<{ metricType: string; mean: number; stdDev: number; ready: boolean }>, connectionId?: string, ): void { - const effectiveConnectionId = connectionId || this.connectionRegistry.getDefaultId() || 'unknown'; + const effectiveConnectionId = + connectionId || this.connectionRegistry.getDefaultId() || 'unknown'; const connLabel = this.getConnectionLabel(effectiveConnectionId); for (const buf of buffers) { this.anomalyDetectionBufferReady.labels(connLabel, buf.metricType).set(buf.ready ? 1 : 0); diff --git a/apps/api/src/settings/settings.service.ts b/apps/api/src/settings/settings.service.ts index ccb42c8f..dcdb2b97 100644 --- a/apps/api/src/settings/settings.service.ts +++ b/apps/api/src/settings/settings.service.ts @@ -50,10 +50,29 @@ export class SettingsService implements OnModuleInit, OnModuleDestroy { return { id: 1, auditPollIntervalMs: parseInt(this.configService.get('AUDIT_POLL_INTERVAL_MS', '60000'), 10), - clientAnalyticsPollIntervalMs: parseInt(this.configService.get('CLIENT_ANALYTICS_POLL_INTERVAL_MS', '60000'), 10), - anomalyPollIntervalMs: parseInt(this.configService.get('ANOMALY_POLL_INTERVAL_MS', '1000'), 10), + clientAnalyticsPollIntervalMs: parseInt( + this.configService.get('CLIENT_ANALYTICS_POLL_INTERVAL_MS', '60000'), + 10, + ), + anomalyPollIntervalMs: parseInt( + this.configService.get('ANOMALY_POLL_INTERVAL_MS', '1000'), + 10, + ), anomalyCacheTtlMs: parseInt(this.configService.get('ANOMALY_CACHE_TTL_MS', '3600000'), 10), - anomalyPrometheusIntervalMs: parseInt(this.configService.get('ANOMALY_PROMETHEUS_INTERVAL_MS', '30000'), 10), + anomalyPrometheusIntervalMs: parseInt( + this.configService.get('ANOMALY_PROMETHEUS_INTERVAL_MS', '30000'), + 10, + ), + throughputForecastingEnabled: + this.configService.get('THROUGHPUT_FORECASTING_ENABLED', 'true') === 'true', + throughputForecastingDefaultRollingWindowMs: parseInt( + this.configService.get('THROUGHPUT_FORECASTING_DEFAULT_ROLLING_WINDOW_MS', '21600000'), + 10, + ), + throughputForecastingDefaultAlertThresholdMs: parseInt( + this.configService.get('THROUGHPUT_FORECASTING_DEFAULT_ALERT_THRESHOLD_MS', '7200000'), + 10, + ), createdAt: now, updatedAt: now, }; diff --git a/apps/api/src/storage/adapters/base-sql.adapter.ts b/apps/api/src/storage/adapters/base-sql.adapter.ts index 53bc3cc1..6124229d 100644 --- a/apps/api/src/storage/adapters/base-sql.adapter.ts +++ b/apps/api/src/storage/adapters/base-sql.adapter.ts @@ -266,6 +266,9 @@ export class RowMappers { anomalyPollIntervalMs: row.anomaly_poll_interval_ms, anomalyCacheTtlMs: row.anomaly_cache_ttl_ms, anomalyPrometheusIntervalMs: row.anomaly_prometheus_interval_ms, + throughputForecastingEnabled: !!row.throughput_forecasting_enabled, + throughputForecastingDefaultRollingWindowMs: row.throughput_forecasting_default_rolling_window_ms, + throughputForecastingDefaultAlertThresholdMs: row.throughput_forecasting_default_alert_threshold_ms, updatedAt: typeof row.updated_at === 'string' ? parseInt(row.updated_at, 10) : row.updated_at, createdAt: typeof row.created_at === 'string' ? parseInt(row.created_at, 10) : row.created_at, }; diff --git a/apps/api/src/storage/adapters/memory.adapter.ts b/apps/api/src/storage/adapters/memory.adapter.ts index be5528ce..d6bed995 100644 --- a/apps/api/src/storage/adapters/memory.adapter.ts +++ b/apps/api/src/storage/adapters/memory.adapter.ts @@ -20,7 +20,6 @@ import { Webhook, WebhookDelivery, WebhookEventType, - DeliveryStatus, StoredSlowLogEntry, SlowLogQueryOptions, StoredCommandLogEntry, @@ -30,8 +29,12 @@ import { LatencySnapshotQueryOptions, StoredMemorySnapshot, MemorySnapshotQueryOptions, + StoredLatencyHistogram, + HotKeyEntry, + HotKeyQueryOptions, + DatabaseConnectionConfig, } from '../../common/interfaces/storage-port.interface'; -import type { VectorIndexSnapshot, VectorIndexSnapshotQueryOptions } from '@betterdb/shared'; +import type { VectorIndexSnapshot, VectorIndexSnapshotQueryOptions, ThroughputSettings } from '@betterdb/shared'; export class MemoryAdapter implements StoragePort { private aclEntries: StoredAclEntry[] = []; @@ -41,9 +44,10 @@ export class MemoryAdapter implements StoragePort { private slowLogEntries: StoredSlowLogEntry[] = []; private commandLogEntries: StoredCommandLogEntry[] = []; private latencySnapshots: StoredLatencySnapshot[] = []; - private latencyHistograms: import('../../common/interfaces/storage-port.interface').StoredLatencyHistogram[] = []; + private latencyHistograms: StoredLatencyHistogram[] = []; private memorySnapshots: StoredMemorySnapshot[] = []; private vectorIndexSnapshots: VectorIndexSnapshot[] = []; + private throughputSettings: Map = new Map(); private settings: AppSettings | null = null; private webhooks: Map = new Map(); private deliveries: Map = new Map(); @@ -133,7 +137,11 @@ export class MemoryAdapter implements StoragePort { return filtered.slice(offset, offset + limit); } - async getAuditStats(startTime?: number, endTime?: number, connectionId?: string): Promise { + async getAuditStats( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise { let filtered = [...this.aclEntries]; if (connectionId) { @@ -179,7 +187,9 @@ export class MemoryAdapter implements StoragePort { async pruneOldEntries(olderThanTimestamp: number, connectionId?: string): Promise { const before = this.aclEntries.length; if (connectionId) { - this.aclEntries = this.aclEntries.filter((e) => e.capturedAt >= olderThanTimestamp || e.connectionId !== connectionId); + this.aclEntries = this.aclEntries.filter( + (e) => e.capturedAt >= olderThanTimestamp || e.connectionId !== connectionId, + ); } else { this.aclEntries = this.aclEntries.filter((e) => e.capturedAt >= olderThanTimestamp); } @@ -193,7 +203,9 @@ export class MemoryAdapter implements StoragePort { return clients.length; } - async getClientSnapshots(options: ClientSnapshotQueryOptions = {}): Promise { + async getClientSnapshots( + options: ClientSnapshotQueryOptions = {}, + ): Promise { let filtered = [...this.clientSnapshots]; if (options.connectionId) { @@ -284,7 +296,11 @@ export class MemoryAdapter implements StoragePort { return Array.from(pointsMap.values()).sort((a, b) => a.timestamp - b.timestamp); } - async getClientAnalyticsStats(startTime?: number, endTime?: number, connectionId?: string): Promise { + async getClientAnalyticsStats( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise { let filtered = [...this.clientSnapshots]; if (connectionId) { @@ -299,7 +315,8 @@ export class MemoryAdapter implements StoragePort { filtered = filtered.filter((c) => c.capturedAt <= endTime); } - const latestTimestamp = filtered.length > 0 ? Math.max(...filtered.map((c) => c.capturedAt)) : 0; + const latestTimestamp = + filtered.length > 0 ? Math.max(...filtered.map((c) => c.capturedAt)) : 0; const currentClients = filtered.filter((c) => c.capturedAt === latestTimestamp); // Group by captured_at to find peak @@ -337,7 +354,10 @@ export class MemoryAdapter implements StoragePort { const currentCount = currentClients.filter((c) => c.name === name).length; const byTimestampForName = new Map(); for (const client of clients) { - byTimestampForName.set(client.capturedAt, (byTimestampForName.get(client.capturedAt) || 0) + 1); + byTimestampForName.set( + client.capturedAt, + (byTimestampForName.get(client.capturedAt) || 0) + 1, + ); } const peakForName = Math.max(...Array.from(byTimestampForName.values())); const avgAge = clients.reduce((sum, c) => sum + c.age, 0) / clients.length; @@ -365,7 +385,10 @@ export class MemoryAdapter implements StoragePort { const currentCount = currentClients.filter((c) => c.user === user).length; const byTimestampForUser = new Map(); for (const client of clients) { - byTimestampForUser.set(client.capturedAt, (byTimestampForUser.get(client.capturedAt) || 0) + 1); + byTimestampForUser.set( + client.capturedAt, + (byTimestampForUser.get(client.capturedAt) || 0) + 1, + ); } const peakForUser = Math.max(...Array.from(byTimestampForUser.values())); @@ -376,7 +399,10 @@ export class MemoryAdapter implements StoragePort { } // Connections by user and name - const connectionsByUserAndName: Record = {}; + const connectionsByUserAndName: Record< + string, + { user: string; name: string; current: number; peak: number; avgAge: number } + > = {}; const byUserAndName = new Map(); for (const client of filtered) { if (client.user && client.name) { @@ -393,7 +419,10 @@ export class MemoryAdapter implements StoragePort { const currentCount = currentClients.filter((c) => c.user === user && c.name === name).length; const byTimestampForCombined = new Map(); for (const client of clients) { - byTimestampForCombined.set(client.capturedAt, (byTimestampForCombined.get(client.capturedAt) || 0) + 1); + byTimestampForCombined.set( + client.capturedAt, + (byTimestampForCombined.get(client.capturedAt) || 0) + 1, + ); } const peakForCombined = Math.max(...Array.from(byTimestampForCombined.values())); const avgAge = clients.reduce((sum, c) => sum + c.age, 0) / clients.length; @@ -466,10 +495,15 @@ export class MemoryAdapter implements StoragePort { return filtered.sort((a, b) => a.capturedAt - b.capturedAt); } - async pruneOldClientSnapshots(olderThanTimestamp: number, connectionId?: string): Promise { + async pruneOldClientSnapshots( + olderThanTimestamp: number, + connectionId?: string, + ): Promise { const before = this.clientSnapshots.length; if (connectionId) { - this.clientSnapshots = this.clientSnapshots.filter((c) => c.capturedAt >= olderThanTimestamp || c.connectionId !== connectionId); + this.clientSnapshots = this.clientSnapshots.filter( + (c) => c.capturedAt >= olderThanTimestamp || c.connectionId !== connectionId, + ); } else { this.clientSnapshots = this.clientSnapshots.filter((c) => c.capturedAt >= olderThanTimestamp); } @@ -491,23 +525,29 @@ export class MemoryAdapter implements StoragePort { async getAnomalyEvents(options: AnomalyQueryOptions = {}): Promise { let filtered = [...this.anomalyEvents]; - if (options.connectionId) filtered = filtered.filter(e => e.connectionId === options.connectionId); - if (options.startTime) filtered = filtered.filter(e => e.timestamp >= options.startTime!); - if (options.endTime) filtered = filtered.filter(e => e.timestamp <= options.endTime!); - if (options.severity) filtered = filtered.filter(e => e.severity === options.severity); - if (options.metricType) filtered = filtered.filter(e => e.metricType === options.metricType); - if (options.resolved !== undefined) filtered = filtered.filter(e => e.resolved === options.resolved); + if (options.connectionId) + filtered = filtered.filter((e) => e.connectionId === options.connectionId); + if (options.startTime) filtered = filtered.filter((e) => e.timestamp >= options.startTime!); + if (options.endTime) filtered = filtered.filter((e) => e.timestamp <= options.endTime!); + if (options.severity) filtered = filtered.filter((e) => e.severity === options.severity); + if (options.metricType) filtered = filtered.filter((e) => e.metricType === options.metricType); + if (options.resolved !== undefined) + filtered = filtered.filter((e) => e.resolved === options.resolved); return filtered .sort((a, b) => b.timestamp - a.timestamp) .slice(options.offset ?? 0, (options.offset ?? 0) + (options.limit ?? 100)); } - async getAnomalyStats(startTime?: number, endTime?: number, connectionId?: string): Promise { + async getAnomalyStats( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise { let filtered = [...this.anomalyEvents]; - if (connectionId) filtered = filtered.filter(e => e.connectionId === connectionId); - if (startTime) filtered = filtered.filter(e => e.timestamp >= startTime); - if (endTime) filtered = filtered.filter(e => e.timestamp <= endTime); + if (connectionId) filtered = filtered.filter((e) => e.connectionId === connectionId); + if (startTime) filtered = filtered.filter((e) => e.timestamp >= startTime); + if (endTime) filtered = filtered.filter((e) => e.timestamp <= endTime); const bySeverity: Record = {}; const byMetric: Record = {}; @@ -522,12 +562,12 @@ export class MemoryAdapter implements StoragePort { bySeverity, byMetric, byPattern: {}, - unresolvedCount: filtered.filter(e => !e.resolved).length, + unresolvedCount: filtered.filter((e) => !e.resolved).length, }; } async resolveAnomaly(id: string, resolvedAt: number): Promise { - const event = this.anomalyEvents.find(e => e.id === id); + const event = this.anomalyEvents.find((e) => e.id === id); if (event && !event.resolved) { event.resolved = true; event.resolvedAt = resolvedAt; @@ -540,15 +580,19 @@ export class MemoryAdapter implements StoragePort { async pruneOldAnomalyEvents(cutoffTimestamp: number, connectionId?: string): Promise { const before = this.anomalyEvents.length; if (connectionId) { - this.anomalyEvents = this.anomalyEvents.filter(e => e.timestamp >= cutoffTimestamp || e.connectionId !== connectionId); + this.anomalyEvents = this.anomalyEvents.filter( + (e) => e.timestamp >= cutoffTimestamp || e.connectionId !== connectionId, + ); } else { - this.anomalyEvents = this.anomalyEvents.filter(e => e.timestamp >= cutoffTimestamp); + this.anomalyEvents = this.anomalyEvents.filter((e) => e.timestamp >= cutoffTimestamp); } return before - this.anomalyEvents.length; } async saveCorrelatedGroup(group: StoredCorrelatedGroup, connectionId: string): Promise { - const existing = this.correlatedGroups.findIndex(g => g.correlationId === group.correlationId && g.connectionId === connectionId); + const existing = this.correlatedGroups.findIndex( + (g) => g.correlationId === group.correlationId && g.connectionId === connectionId, + ); if (existing >= 0) { this.correlatedGroups[existing] = { ...group, connectionId }; } else { @@ -560,11 +604,12 @@ export class MemoryAdapter implements StoragePort { async getCorrelatedGroups(options: AnomalyQueryOptions = {}): Promise { let filtered = [...this.correlatedGroups]; - if (options.connectionId) filtered = filtered.filter(g => g.connectionId === options.connectionId); - if (options.startTime) filtered = filtered.filter(g => g.timestamp >= options.startTime!); - if (options.endTime) filtered = filtered.filter(g => g.timestamp <= options.endTime!); - if (options.severity) filtered = filtered.filter(g => g.severity === options.severity); - if (options.pattern) filtered = filtered.filter(g => g.pattern === options.pattern); + if (options.connectionId) + filtered = filtered.filter((g) => g.connectionId === options.connectionId); + if (options.startTime) filtered = filtered.filter((g) => g.timestamp >= options.startTime!); + if (options.endTime) filtered = filtered.filter((g) => g.timestamp <= options.endTime!); + if (options.severity) filtered = filtered.filter((g) => g.severity === options.severity); + if (options.pattern) filtered = filtered.filter((g) => g.pattern === options.pattern); return filtered .sort((a, b) => b.timestamp - a.timestamp) @@ -574,14 +619,19 @@ export class MemoryAdapter implements StoragePort { async pruneOldCorrelatedGroups(cutoffTimestamp: number, connectionId?: string): Promise { const before = this.correlatedGroups.length; if (connectionId) { - this.correlatedGroups = this.correlatedGroups.filter(g => g.timestamp >= cutoffTimestamp || g.connectionId !== connectionId); + this.correlatedGroups = this.correlatedGroups.filter( + (g) => g.timestamp >= cutoffTimestamp || g.connectionId !== connectionId, + ); } else { - this.correlatedGroups = this.correlatedGroups.filter(g => g.timestamp >= cutoffTimestamp); + this.correlatedGroups = this.correlatedGroups.filter((g) => g.timestamp >= cutoffTimestamp); } return before - this.correlatedGroups.length; } - async saveKeyPatternSnapshots(_snapshots: KeyPatternSnapshot[], _connectionId: string): Promise { + async saveKeyPatternSnapshots( + _snapshots: KeyPatternSnapshot[], + _connectionId: string, + ): Promise { throw new Error('Key analytics not supported in memory adapter'); } @@ -589,28 +639,47 @@ export class MemoryAdapter implements StoragePort { throw new Error('Key analytics not supported in memory adapter'); } - async getKeyAnalyticsSummary(_startTime?: number, _endTime?: number, _connectionId?: string): Promise { + async getKeyAnalyticsSummary( + _startTime?: number, + _endTime?: number, + _connectionId?: string, + ): Promise { throw new Error('Key analytics not supported in memory adapter'); } - async getKeyPatternTrends(_pattern: string, _startTime: number, _endTime: number, _connectionId?: string): Promise> { + async getKeyPatternTrends( + _pattern: string, + _startTime: number, + _endTime: number, + _connectionId?: string, + ): Promise< + Array<{ + timestamp: number; + keyCount: number; + memoryBytes: number; + staleCount: number; + }> + > { throw new Error('Key analytics not supported in memory adapter'); } - async pruneOldKeyPatternSnapshots(_cutoffTimestamp: number, _connectionId?: string): Promise { + async pruneOldKeyPatternSnapshots( + _cutoffTimestamp: number, + _connectionId?: string, + ): Promise { throw new Error('Key analytics not supported in memory adapter'); } - async saveHotKeys(_entries: import('../../common/interfaces/storage-port.interface').HotKeyEntry[], _connectionId: string): Promise { + async saveHotKeys( + _entries: HotKeyEntry[], + _connectionId: string, + ): Promise { throw new Error('Hot key stats not supported in memory adapter'); } - async getHotKeys(_options?: import('../../common/interfaces/storage-port.interface').HotKeyQueryOptions): Promise { + async getHotKeys( + _options?: HotKeyQueryOptions, + ): Promise { throw new Error('Hot key stats not supported in memory adapter'); } @@ -655,6 +724,17 @@ export class MemoryAdapter implements StoragePort { if (updates.anomalyPrometheusIntervalMs !== undefined) { validUpdates.anomalyPrometheusIntervalMs = updates.anomalyPrometheusIntervalMs; } + if (updates.throughputForecastingEnabled !== undefined) { + validUpdates.throughputForecastingEnabled = updates.throughputForecastingEnabled; + } + if (updates.throughputForecastingDefaultRollingWindowMs !== undefined) { + validUpdates.throughputForecastingDefaultRollingWindowMs = + updates.throughputForecastingDefaultRollingWindowMs; + } + if (updates.throughputForecastingDefaultAlertThresholdMs !== undefined) { + validUpdates.throughputForecastingDefaultAlertThresholdMs = + updates.throughputForecastingDefaultAlertThresholdMs; + } this.settings = { ...this.settings, @@ -685,36 +765,38 @@ export class MemoryAdapter implements StoragePort { async getWebhooksByInstance(connectionId?: string): Promise { let webhooks = Array.from(this.webhooks.values()); if (connectionId) { - webhooks = webhooks.filter(w => w.connectionId === connectionId || !w.connectionId); + webhooks = webhooks.filter((w) => w.connectionId === connectionId || !w.connectionId); } else { // No connectionId provided - only return global webhooks (not scoped to any connection) - webhooks = webhooks.filter(w => !w.connectionId); + webhooks = webhooks.filter((w) => !w.connectionId); } - return webhooks - .sort((a, b) => b.createdAt - a.createdAt) - .map(w => ({ ...w })); + return webhooks.sort((a, b) => b.createdAt - a.createdAt).map((w) => ({ ...w })); } async getWebhooksByEvent(event: WebhookEventType, connectionId?: string): Promise { - let webhooks = Array.from(this.webhooks.values()) - .filter(w => w.enabled && w.events.includes(event)); + let webhooks = Array.from(this.webhooks.values()).filter( + (w) => w.enabled && w.events.includes(event), + ); if (connectionId) { // Return webhooks scoped to this connection OR global webhooks (no connectionId) - webhooks = webhooks.filter(w => w.connectionId === connectionId || !w.connectionId); + webhooks = webhooks.filter((w) => w.connectionId === connectionId || !w.connectionId); } else { // No connectionId provided - only return global webhooks (not scoped to any connection) - webhooks = webhooks.filter(w => !w.connectionId); + webhooks = webhooks.filter((w) => !w.connectionId); } - return webhooks.map(w => ({ ...w })); + return webhooks.map((w) => ({ ...w })); } - async updateWebhook(id: string, updates: Partial>): Promise { + async updateWebhook( + id: string, + updates: Partial>, + ): Promise { const webhook = this.webhooks.get(id); if (!webhook) return null; // Filter out undefined values to prevent overwriting existing fields const definedUpdates = Object.fromEntries( - Object.entries(updates).filter(([_, value]) => value !== undefined) + Object.entries(updates).filter(([_, value]) => value !== undefined), ); const updated: Webhook = { @@ -734,12 +816,14 @@ export class MemoryAdapter implements StoragePort { const deliveriesToDelete = Array.from(this.deliveries.entries()) .filter(([_, d]) => d.webhookId === id) .map(([id]) => id); - deliveriesToDelete.forEach(deliveryId => this.deliveries.delete(deliveryId)); + deliveriesToDelete.forEach((deliveryId) => this.deliveries.delete(deliveryId)); } return deleted; } - async createDelivery(delivery: Omit): Promise { + async createDelivery( + delivery: Omit, + ): Promise { const now = Date.now(); const id = randomUUID(); const newDelivery: WebhookDelivery = { @@ -750,12 +834,12 @@ export class MemoryAdapter implements StoragePort { this.deliveries.set(id, newDelivery); const webhookDeliveries = Array.from(this.deliveries.values()) - .filter(d => d.webhookId === delivery.webhookId) + .filter((d) => d.webhookId === delivery.webhookId) .sort((a, b) => b.createdAt - a.createdAt); if (webhookDeliveries.length > this.MAX_DELIVERIES_PER_WEBHOOK) { const toDelete = webhookDeliveries.slice(this.MAX_DELIVERIES_PER_WEBHOOK); - toDelete.forEach(d => this.deliveries.delete(d.id)); + toDelete.forEach((d) => this.deliveries.delete(d.id)); } return { ...newDelivery }; @@ -766,15 +850,22 @@ export class MemoryAdapter implements StoragePort { return delivery ? { ...delivery } : null; } - async getDeliveriesByWebhook(webhookId: string, limit: number = 50, offset: number = 0): Promise { + async getDeliveriesByWebhook( + webhookId: string, + limit: number = 50, + offset: number = 0, + ): Promise { return Array.from(this.deliveries.values()) - .filter(d => d.webhookId === webhookId) + .filter((d) => d.webhookId === webhookId) .sort((a, b) => b.createdAt - a.createdAt) .slice(offset, offset + limit) - .map(d => ({ ...d })); + .map((d) => ({ ...d })); } - async updateDelivery(id: string, updates: Partial>): Promise { + async updateDelivery( + id: string, + updates: Partial>, + ): Promise { const delivery = this.deliveries.get(id); if (!delivery) return false; @@ -789,23 +880,30 @@ export class MemoryAdapter implements StoragePort { return true; } - async getRetriableDeliveries(limit: number = 100, connectionId?: string): Promise { + async getRetriableDeliveries( + limit: number = 100, + connectionId?: string, + ): Promise { const now = Date.now(); - let deliveries = Array.from(this.deliveries.values()) - .filter(d => d.status === 'retrying' && d.nextRetryAt && d.nextRetryAt <= now); + let deliveries = Array.from(this.deliveries.values()).filter( + (d) => d.status === 'retrying' && d.nextRetryAt && d.nextRetryAt <= now, + ); if (connectionId) { - deliveries = deliveries.filter(d => d.connectionId === connectionId); + deliveries = deliveries.filter((d) => d.connectionId === connectionId); } return deliveries .sort((a, b) => (a.nextRetryAt || 0) - (b.nextRetryAt || 0)) .slice(0, limit) - .map(d => ({ ...d })); + .map((d) => ({ ...d })); } async pruneOldDeliveries(cutoffTimestamp: number, connectionId?: string): Promise { const before = this.deliveries.size; Array.from(this.deliveries.entries()) - .filter(([_, d]) => d.createdAt < cutoffTimestamp && (!connectionId || d.connectionId === connectionId)) + .filter( + ([_, d]) => + d.createdAt < cutoffTimestamp && (!connectionId || d.connectionId === connectionId), + ) .forEach(([id]) => this.deliveries.delete(id)); return before - this.deliveries.size; } @@ -816,7 +914,11 @@ export class MemoryAdapter implements StoragePort { for (const entry of entries) { // Check for duplicates based on unique constraint (including connectionId) const exists = this.slowLogEntries.some( - e => e.id === entry.id && e.sourceHost === entry.sourceHost && e.sourcePort === entry.sourcePort && e.connectionId === connectionId + (e) => + e.id === entry.id && + e.sourceHost === entry.sourceHost && + e.sourcePort === entry.sourcePort && + e.connectionId === connectionId, ); if (!exists) { this.slowLogEntries.push({ ...entry, connectionId }); @@ -830,25 +932,25 @@ export class MemoryAdapter implements StoragePort { let filtered = [...this.slowLogEntries]; if (options.connectionId) { - filtered = filtered.filter(e => e.connectionId === options.connectionId); + filtered = filtered.filter((e) => e.connectionId === options.connectionId); } if (options.startTime) { - filtered = filtered.filter(e => e.timestamp >= options.startTime!); + filtered = filtered.filter((e) => e.timestamp >= options.startTime!); } if (options.endTime) { - filtered = filtered.filter(e => e.timestamp <= options.endTime!); + filtered = filtered.filter((e) => e.timestamp <= options.endTime!); } if (options.command) { const cmd = options.command.toLowerCase(); // command is an array, check if the first element (command name) matches - filtered = filtered.filter(e => e.command[0]?.toLowerCase().includes(cmd)); + filtered = filtered.filter((e) => e.command[0]?.toLowerCase().includes(cmd)); } if (options.clientName) { const name = options.clientName.toLowerCase(); - filtered = filtered.filter(e => e.clientName.toLowerCase().includes(name)); + filtered = filtered.filter((e) => e.clientName.toLowerCase().includes(name)); } if (options.minDuration) { - filtered = filtered.filter(e => e.duration >= options.minDuration!); + filtered = filtered.filter((e) => e.duration >= options.minDuration!); } return filtered @@ -859,28 +961,38 @@ export class MemoryAdapter implements StoragePort { async getLatestSlowLogId(connectionId?: string): Promise { let entries = this.slowLogEntries; if (connectionId) { - entries = entries.filter(e => e.connectionId === connectionId); + entries = entries.filter((e) => e.connectionId === connectionId); } if (entries.length === 0) return null; - return Math.max(...entries.map(e => e.id)); + return Math.max(...entries.map((e) => e.id)); } async pruneOldSlowLogEntries(cutoffTimestamp: number, connectionId?: string): Promise { const before = this.slowLogEntries.length; if (connectionId) { - this.slowLogEntries = this.slowLogEntries.filter(e => e.capturedAt >= cutoffTimestamp || e.connectionId !== connectionId); + this.slowLogEntries = this.slowLogEntries.filter( + (e) => e.capturedAt >= cutoffTimestamp || e.connectionId !== connectionId, + ); } else { - this.slowLogEntries = this.slowLogEntries.filter(e => e.capturedAt >= cutoffTimestamp); + this.slowLogEntries = this.slowLogEntries.filter((e) => e.capturedAt >= cutoffTimestamp); } return before - this.slowLogEntries.length; } // Command Log Methods - async saveCommandLogEntries(entries: StoredCommandLogEntry[], connectionId: string): Promise { + async saveCommandLogEntries( + entries: StoredCommandLogEntry[], + connectionId: string, + ): Promise { let savedCount = 0; for (const entry of entries) { const exists = this.commandLogEntries.some( - e => e.id === entry.id && e.type === entry.type && e.sourceHost === entry.sourceHost && e.sourcePort === entry.sourcePort && e.connectionId === connectionId + (e) => + e.id === entry.id && + e.type === entry.type && + e.sourceHost === entry.sourceHost && + e.sourcePort === entry.sourcePort && + e.connectionId === connectionId, ); if (!exists) { this.commandLogEntries.push({ ...entry, connectionId }); @@ -890,31 +1002,33 @@ export class MemoryAdapter implements StoragePort { return savedCount; } - async getCommandLogEntries(options: CommandLogQueryOptions = {}): Promise { + async getCommandLogEntries( + options: CommandLogQueryOptions = {}, + ): Promise { let filtered = [...this.commandLogEntries]; if (options.connectionId) { - filtered = filtered.filter(e => e.connectionId === options.connectionId); + filtered = filtered.filter((e) => e.connectionId === options.connectionId); } if (options.startTime) { - filtered = filtered.filter(e => e.timestamp >= options.startTime!); + filtered = filtered.filter((e) => e.timestamp >= options.startTime!); } if (options.endTime) { - filtered = filtered.filter(e => e.timestamp <= options.endTime!); + filtered = filtered.filter((e) => e.timestamp <= options.endTime!); } if (options.command) { const cmd = options.command.toLowerCase(); - filtered = filtered.filter(e => e.command[0]?.toLowerCase().includes(cmd)); + filtered = filtered.filter((e) => e.command[0]?.toLowerCase().includes(cmd)); } if (options.clientName) { const name = options.clientName.toLowerCase(); - filtered = filtered.filter(e => e.clientName.toLowerCase().includes(name)); + filtered = filtered.filter((e) => e.clientName.toLowerCase().includes(name)); } if (options.type) { - filtered = filtered.filter(e => e.type === options.type); + filtered = filtered.filter((e) => e.type === options.type); } if (options.minDuration) { - filtered = filtered.filter(e => e.duration >= options.minDuration!); + filtered = filtered.filter((e) => e.duration >= options.minDuration!); } return filtered @@ -923,43 +1037,52 @@ export class MemoryAdapter implements StoragePort { } async getLatestCommandLogId(type: CommandLogType, connectionId?: string): Promise { - let entriesOfType = this.commandLogEntries.filter(e => e.type === type); + let entriesOfType = this.commandLogEntries.filter((e) => e.type === type); if (connectionId) { - entriesOfType = entriesOfType.filter(e => e.connectionId === connectionId); + entriesOfType = entriesOfType.filter((e) => e.connectionId === connectionId); } if (entriesOfType.length === 0) return null; - return Math.max(...entriesOfType.map(e => e.id)); + return Math.max(...entriesOfType.map((e) => e.id)); } async pruneOldCommandLogEntries(cutoffTimestamp: number, connectionId?: string): Promise { const before = this.commandLogEntries.length; if (connectionId) { - this.commandLogEntries = this.commandLogEntries.filter(e => e.capturedAt >= cutoffTimestamp || e.connectionId !== connectionId); + this.commandLogEntries = this.commandLogEntries.filter( + (e) => e.capturedAt >= cutoffTimestamp || e.connectionId !== connectionId, + ); } else { - this.commandLogEntries = this.commandLogEntries.filter(e => e.capturedAt >= cutoffTimestamp); + this.commandLogEntries = this.commandLogEntries.filter( + (e) => e.capturedAt >= cutoffTimestamp, + ); } return before - this.commandLogEntries.length; } // Latency Snapshot Methods - async saveLatencySnapshots(snapshots: StoredLatencySnapshot[], connectionId: string): Promise { + async saveLatencySnapshots( + snapshots: StoredLatencySnapshot[], + connectionId: string, + ): Promise { for (const snapshot of snapshots) { this.latencySnapshots.push({ ...snapshot, connectionId }); } return snapshots.length; } - async getLatencySnapshots(options: LatencySnapshotQueryOptions = {}): Promise { + async getLatencySnapshots( + options: LatencySnapshotQueryOptions = {}, + ): Promise { let filtered = [...this.latencySnapshots]; if (options.connectionId) { - filtered = filtered.filter(e => e.connectionId === options.connectionId); + filtered = filtered.filter((e) => e.connectionId === options.connectionId); } if (options.startTime) { - filtered = filtered.filter(e => e.timestamp >= options.startTime!); + filtered = filtered.filter((e) => e.timestamp >= options.startTime!); } if (options.endTime) { - filtered = filtered.filter(e => e.timestamp <= options.endTime!); + filtered = filtered.filter((e) => e.timestamp <= options.endTime!); } return filtered @@ -970,56 +1093,71 @@ export class MemoryAdapter implements StoragePort { async pruneOldLatencySnapshots(cutoffTimestamp: number, connectionId?: string): Promise { const before = this.latencySnapshots.length; if (connectionId) { - this.latencySnapshots = this.latencySnapshots.filter(e => e.timestamp >= cutoffTimestamp || e.connectionId !== connectionId); + this.latencySnapshots = this.latencySnapshots.filter( + (e) => e.timestamp >= cutoffTimestamp || e.connectionId !== connectionId, + ); } else { - this.latencySnapshots = this.latencySnapshots.filter(e => e.timestamp >= cutoffTimestamp); + this.latencySnapshots = this.latencySnapshots.filter((e) => e.timestamp >= cutoffTimestamp); } return before - this.latencySnapshots.length; } // Latency Histogram Methods - async saveLatencyHistogram(histogram: import('../../common/interfaces/storage-port.interface').StoredLatencyHistogram, connectionId: string): Promise { + async saveLatencyHistogram( + histogram: StoredLatencyHistogram, + connectionId: string, + ): Promise { this.latencyHistograms.push({ ...histogram, connectionId }); return 1; } - async getLatencyHistograms(options: { connectionId?: string; startTime?: number; endTime?: number; limit?: number } = {}): Promise { + async getLatencyHistograms( + options: { connectionId?: string; startTime?: number; endTime?: number; limit?: number } = {}, + ): Promise { let filtered = [...this.latencyHistograms]; - if (options.connectionId) filtered = filtered.filter(e => e.connectionId === options.connectionId); - if (options.startTime) filtered = filtered.filter(e => e.timestamp >= options.startTime!); - if (options.endTime) filtered = filtered.filter(e => e.timestamp <= options.endTime!); + if (options.connectionId) + filtered = filtered.filter((e) => e.connectionId === options.connectionId); + if (options.startTime) filtered = filtered.filter((e) => e.timestamp >= options.startTime!); + if (options.endTime) filtered = filtered.filter((e) => e.timestamp <= options.endTime!); return filtered.sort((a, b) => b.timestamp - a.timestamp).slice(0, options.limit ?? 1); } async pruneOldLatencyHistograms(cutoffTimestamp: number, connectionId?: string): Promise { const before = this.latencyHistograms.length; if (connectionId) { - this.latencyHistograms = this.latencyHistograms.filter(e => e.timestamp >= cutoffTimestamp || e.connectionId !== connectionId); + this.latencyHistograms = this.latencyHistograms.filter( + (e) => e.timestamp >= cutoffTimestamp || e.connectionId !== connectionId, + ); } else { - this.latencyHistograms = this.latencyHistograms.filter(e => e.timestamp >= cutoffTimestamp); + this.latencyHistograms = this.latencyHistograms.filter((e) => e.timestamp >= cutoffTimestamp); } return before - this.latencyHistograms.length; } // Memory Snapshot Methods - async saveMemorySnapshots(snapshots: StoredMemorySnapshot[], connectionId: string): Promise { + async saveMemorySnapshots( + snapshots: StoredMemorySnapshot[], + connectionId: string, + ): Promise { for (const snapshot of snapshots) { this.memorySnapshots.push({ ...snapshot, connectionId }); } return snapshots.length; } - async getMemorySnapshots(options: MemorySnapshotQueryOptions = {}): Promise { + async getMemorySnapshots( + options: MemorySnapshotQueryOptions = {}, + ): Promise { let filtered = [...this.memorySnapshots]; if (options.connectionId) { - filtered = filtered.filter(e => e.connectionId === options.connectionId); + filtered = filtered.filter((e) => e.connectionId === options.connectionId); } if (options.startTime) { - filtered = filtered.filter(e => e.timestamp >= options.startTime!); + filtered = filtered.filter((e) => e.timestamp >= options.startTime!); } if (options.endTime) { - filtered = filtered.filter(e => e.timestamp <= options.endTime!); + filtered = filtered.filter((e) => e.timestamp <= options.endTime!); } return filtered @@ -1030,64 +1168,87 @@ export class MemoryAdapter implements StoragePort { async pruneOldMemorySnapshots(cutoffTimestamp: number, connectionId?: string): Promise { const before = this.memorySnapshots.length; if (connectionId) { - this.memorySnapshots = this.memorySnapshots.filter(e => e.timestamp >= cutoffTimestamp || e.connectionId !== connectionId); + this.memorySnapshots = this.memorySnapshots.filter( + (e) => e.timestamp >= cutoffTimestamp || e.connectionId !== connectionId, + ); } else { - this.memorySnapshots = this.memorySnapshots.filter(e => e.timestamp >= cutoffTimestamp); + this.memorySnapshots = this.memorySnapshots.filter((e) => e.timestamp >= cutoffTimestamp); } return before - this.memorySnapshots.length; } // Vector Index Snapshot Methods - async saveVectorIndexSnapshots(snapshots: VectorIndexSnapshot[], connectionId: string): Promise { + async saveVectorIndexSnapshots( + snapshots: VectorIndexSnapshot[], + connectionId: string, + ): Promise { for (const snapshot of snapshots) { this.vectorIndexSnapshots.push({ ...snapshot, connectionId }); } return snapshots.length; } - async getVectorIndexSnapshots(options: VectorIndexSnapshotQueryOptions = {}): Promise { + async getVectorIndexSnapshots( + options: VectorIndexSnapshotQueryOptions = {}, + ): Promise { let filtered = [...this.vectorIndexSnapshots]; if (options.connectionId) { - filtered = filtered.filter(e => e.connectionId === options.connectionId); + filtered = filtered.filter((e) => e.connectionId === options.connectionId); } if (options.indexName) { - filtered = filtered.filter(e => e.indexName === options.indexName); + filtered = filtered.filter((e) => e.indexName === options.indexName); } if (options.startTime) { - filtered = filtered.filter(e => e.timestamp >= options.startTime!); + filtered = filtered.filter((e) => e.timestamp >= options.startTime!); } if (options.endTime) { - filtered = filtered.filter(e => e.timestamp <= options.endTime!); + filtered = filtered.filter((e) => e.timestamp <= options.endTime!); } - return filtered - .sort((a, b) => b.timestamp - a.timestamp) - .slice(0, options.limit ?? 200); + return filtered.sort((a, b) => b.timestamp - a.timestamp).slice(0, options.limit ?? 200); } - async pruneOldVectorIndexSnapshots(cutoffTimestamp: number, connectionId?: string): Promise { + async pruneOldVectorIndexSnapshots( + cutoffTimestamp: number, + connectionId?: string, + ): Promise { const before = this.vectorIndexSnapshots.length; if (connectionId) { - this.vectorIndexSnapshots = this.vectorIndexSnapshots.filter(e => e.timestamp >= cutoffTimestamp || e.connectionId !== connectionId); + this.vectorIndexSnapshots = this.vectorIndexSnapshots.filter( + (e) => e.timestamp >= cutoffTimestamp || e.connectionId !== connectionId, + ); } else { - this.vectorIndexSnapshots = this.vectorIndexSnapshots.filter(e => e.timestamp >= cutoffTimestamp); + this.vectorIndexSnapshots = this.vectorIndexSnapshots.filter( + (e) => e.timestamp >= cutoffTimestamp, + ); } return before - this.vectorIndexSnapshots.length; } // Connection Management Methods (in-memory storage) - private connections: Map = new Map(); - - async saveConnection(config: import('../../common/interfaces/storage-port.interface').DatabaseConnectionConfig): Promise { + private connections: Map< + string, + DatabaseConnectionConfig + > = new Map(); + + async saveConnection( + config: DatabaseConnectionConfig, + ): Promise { this.connections.set(config.id, config); } - async getConnections(): Promise { + async getConnections(): Promise< + DatabaseConnectionConfig[] + > { return Array.from(this.connections.values()).sort((a, b) => a.createdAt - b.createdAt); } - async getConnection(id: string): Promise { + async getConnection( + id: string, + ): Promise< + DatabaseConnectionConfig | null + > { return this.connections.get(id) || null; } @@ -1095,7 +1256,12 @@ export class MemoryAdapter implements StoragePort { this.connections.delete(id); } - async updateConnection(id: string, updates: Partial): Promise { + async updateConnection( + id: string, + updates: Partial< + DatabaseConnectionConfig + >, + ): Promise { const config = this.connections.get(id); if (config) { this.connections.set(id, { ...config, ...updates, updatedAt: Date.now() }); @@ -1104,19 +1270,64 @@ export class MemoryAdapter implements StoragePort { // Agent Token Methods (no-op for non-cloud deployments) - private agentTokens = new Map(); - - async saveAgentToken(token: { id: string; name: string; type: 'agent' | 'mcp'; tokenHash: string; createdAt: number; expiresAt: number; revokedAt: number | null; lastUsedAt: number | null }): Promise { + private agentTokens = new Map< + string, + { + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + } + >(); + + async saveAgentToken(token: { + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + }): Promise { this.agentTokens.set(token.id, token); } - async getAgentTokens(type?: 'agent' | 'mcp'): Promise> { + async getAgentTokens( + type?: 'agent' | 'mcp', + ): Promise< + Array<{ + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + }> + > { let tokens = Array.from(this.agentTokens.values()); - if (type) tokens = tokens.filter(t => t.type === type); + if (type) tokens = tokens.filter((t) => t.type === type); return tokens.sort((a, b) => b.createdAt - a.createdAt); } - async getAgentTokenByHash(hash: string): Promise<{ id: string; name: string; type: 'agent' | 'mcp'; tokenHash: string; createdAt: number; expiresAt: number; revokedAt: number | null; lastUsedAt: number | null } | null> { + async getAgentTokenByHash( + hash: string, + ): Promise<{ + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + } | null> { for (const token of this.agentTokens.values()) { if (token.tokenHash === hash) return token; } @@ -1136,4 +1347,26 @@ export class MemoryAdapter implements StoragePort { token.lastUsedAt = Date.now(); } } + + // Throughput Forecasting Settings + async getThroughputSettings( + connectionId: string, + ): Promise { + return this.throughputSettings.get(connectionId) ?? null; + } + + async saveThroughputSettings( + settings: ThroughputSettings, + ): Promise { + this.throughputSettings.set(settings.connectionId, settings); + return { ...settings }; + } + + async deleteThroughputSettings(connectionId: string): Promise { + return this.throughputSettings.delete(connectionId); + } + + async getActiveThroughputSettings(): Promise { + return [...this.throughputSettings.values()].filter((s) => s.enabled && s.opsCeiling !== null); + } } diff --git a/apps/api/src/storage/adapters/postgres.adapter.ts b/apps/api/src/storage/adapters/postgres.adapter.ts index 82ad58b3..51eba4b2 100644 --- a/apps/api/src/storage/adapters/postgres.adapter.ts +++ b/apps/api/src/storage/adapters/postgres.adapter.ts @@ -30,13 +30,24 @@ import { LatencySnapshotQueryOptions, StoredMemorySnapshot, MemorySnapshotQueryOptions, + HotKeyEntry, + HotKeyQueryOptions, + StoredLatencyHistogram, + DatabaseConnectionConfig, } from '../../common/interfaces/storage-port.interface'; -import type { VectorIndexSnapshot, VectorIndexSnapshotQueryOptions } from '@betterdb/shared'; +import type { + VectorIndexSnapshot, + VectorIndexSnapshotQueryOptions, + ThroughputSettings, +} from '@betterdb/shared'; import { PostgresDialect, RowMappers } from './base-sql.adapter'; +// TODO: Split into domain-specific repositories (acl, webhooks, anomaly, slowlog, etc.) +// and turn PostgresAdapter into a thin facade that delegates to them. + export interface PostgresAdapterConfig { connectionString: string; - schema?: string; // PostgreSQL schema for tenant isolation + schema?: string; // PostgreSQL schema for tenant isolation } export class PostgresAdapter implements StoragePort { @@ -44,7 +55,7 @@ export class PostgresAdapter implements StoragePort { private ready: boolean = false; private readonly mappers = new RowMappers(PostgresDialect); - constructor(private config: PostgresAdapterConfig) { } + constructor(private config: PostgresAdapterConfig) {} async initialize(): Promise { try { @@ -64,7 +75,7 @@ export class PostgresAdapter implements StoragePort { if (sslCa.startsWith('http://')) { throw new Error( 'Fetching SSL CA certificate over insecure HTTP is not allowed. ' + - 'Use HTTPS or provide a local file path instead.' + 'Use HTTPS or provide a local file path instead.', ); } @@ -72,17 +83,17 @@ export class PostgresAdapter implements StoragePort { // Whitelist of trusted domains for official SSL certificate authorities // Only specific official certificate distribution endpoints, not general cloud storage const trustedDomains = [ - 'truststore.pki.rds.amazonaws.com', // AWS RDS official CA bundle - 'storage.googleapis.com/cloud-sql-ca', // GCP Cloud SQL official path (must check full path) - 'dl.cacerts.digicert.com', // DigiCert CA certificates (used by Azure) - 'cacerts.digicert.com', // DigiCert CA certificates alternate + 'truststore.pki.rds.amazonaws.com', // AWS RDS official CA bundle + 'storage.googleapis.com/cloud-sql-ca', // GCP Cloud SQL official path (must check full path) + 'dl.cacerts.digicert.com', // DigiCert CA certificates (used by Azure) + 'cacerts.digicert.com', // DigiCert CA certificates alternate ]; const url = new URL(sslCa); // Check domain with proper boundary to prevent subdomain spoofing // For path-specific validation (like GCS), also check the path - const isTrustedDomain = trustedDomains.some(domain => { + const isTrustedDomain = trustedDomains.some((domain) => { // Exact hostname match if (url.hostname === domain) { return true; @@ -96,8 +107,10 @@ export class PostgresAdapter implements StoragePort { // Special case for GCS: must have specific path prefix with trailing slash // This prevents /cloud-sql-ca-evil/ from matching if (domain === 'storage.googleapis.com/cloud-sql-ca') { - return url.hostname === 'storage.googleapis.com' && - url.pathname.startsWith('/cloud-sql-ca/'); + return ( + url.hostname === 'storage.googleapis.com' && + url.pathname.startsWith('/cloud-sql-ca/') + ); } return false; @@ -106,8 +119,8 @@ export class PostgresAdapter implements StoragePort { if (!isTrustedDomain) { throw new Error( `SSL certificate fetching blocked: ${url.hostname} is not in the trusted domains list. ` + - `Trusted domains: ${trustedDomains.join(', ')}. ` + - `Use a local file path or a trusted cloud provider URL.` + `Trusted domains: ${trustedDomains.join(', ')}. ` + + `Use a local file path or a trusted cloud provider URL.`, ); } @@ -121,7 +134,7 @@ export class PostgresAdapter implements StoragePort { const response = await fetch(sslCa, { signal: controller.signal, - redirect: 'error' // Prevent redirect bypass of domain whitelist + redirect: 'error', // Prevent redirect bypass of domain whitelist }); clearTimeout(timeoutId); @@ -357,7 +370,11 @@ export class PostgresAdapter implements StoragePort { return result.rows.map((row) => this.mappers.mapAclEntryRow(row)); } - async getAuditStats(startTime?: number, endTime?: number, connectionId?: string): Promise { + async getAuditStats( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise { if (!this.pool) { throw new Error('Database not initialized'); } @@ -421,9 +438,9 @@ export class PostgresAdapter implements StoragePort { const timeRange = timeRangeResult.rows[0].earliest !== null && timeRangeResult.rows[0].latest !== null ? { - earliest: parseInt(timeRangeResult.rows[0].earliest), - latest: parseInt(timeRangeResult.rows[0].latest), - } + earliest: parseInt(timeRangeResult.rows[0].earliest), + latest: parseInt(timeRangeResult.rows[0].latest), + } : null; return { @@ -443,7 +460,7 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'DELETE FROM acl_audit WHERE captured_at < $1 AND connection_id = $2', - [olderThanTimestamp, connectionId] + [olderThanTimestamp, connectionId], ); return result.rowCount || 0; } @@ -509,7 +526,9 @@ export class PostgresAdapter implements StoragePort { return clients.length; } - async getClientSnapshots(options: ClientSnapshotQueryOptions = {}): Promise { + async getClientSnapshots( + options: ClientSnapshotQueryOptions = {}, + ): Promise { if (!this.pool) { throw new Error('Database not initialized'); } @@ -632,7 +651,11 @@ export class PostgresAdapter implements StoragePort { return Array.from(pointsMap.values()).sort((a, b) => a.timestamp - b.timestamp); } - async getClientAnalyticsStats(startTime?: number, endTime?: number, connectionId?: string): Promise { + async getClientAnalyticsStats( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise { if (!this.pool) { throw new Error('Database not initialized'); } @@ -665,9 +688,12 @@ export class PostgresAdapter implements StoragePort { ); const latestTimestamp = latestResult.rows[0].latest; - const currentConditions = latestTimestamp ? [...conditions, `captured_at = $${paramIndex++}`] : conditions; + const currentConditions = latestTimestamp + ? [...conditions, `captured_at = $${paramIndex++}`] + : conditions; const currentParams = latestTimestamp ? [...params, latestTimestamp] : params; - const currentWhereClause = currentConditions.length > 0 ? `WHERE ${currentConditions.join(' AND ')}` : ''; + const currentWhereClause = + currentConditions.length > 0 ? `WHERE ${currentConditions.join(' AND ')}` : ''; const currentConnectionsResult = await this.pool.query( `SELECT COUNT(*) as count FROM client_snapshots ${currentWhereClause}`, @@ -791,7 +817,10 @@ export class PostgresAdapter implements StoragePort { params, ); - const connectionsByUserAndName: Record = {}; + const connectionsByUserAndName: Record< + string, + { user: string; name: string; current: number; peak: number; avgAge: number } + > = {}; for (const row of byUserAndNameResult.rows) { const key = `${row.user_name}:${row.name}`; @@ -836,9 +865,9 @@ export class PostgresAdapter implements StoragePort { const timeRange = timeRangeResult.rows[0].earliest !== null && timeRangeResult.rows[0].latest !== null ? { - earliest: parseInt(timeRangeResult.rows[0].earliest), - latest: parseInt(timeRangeResult.rows[0].latest), - } + earliest: parseInt(timeRangeResult.rows[0].earliest), + latest: parseInt(timeRangeResult.rows[0].latest), + } : null; return { @@ -911,7 +940,10 @@ export class PostgresAdapter implements StoragePort { return result.rows.map((row) => this.mappers.mapClientRow(row)); } - async pruneOldClientSnapshots(olderThanTimestamp: number, connectionId?: string): Promise { + async pruneOldClientSnapshots( + olderThanTimestamp: number, + connectionId?: string, + ): Promise { if (!this.pool) { throw new Error('Database not initialized'); } @@ -919,7 +951,7 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'DELETE FROM client_snapshots WHERE captured_at < $1 AND connection_id = $2', - [olderThanTimestamp, connectionId] + [olderThanTimestamp, connectionId], ); return result.rowCount || 0; } @@ -1126,10 +1158,27 @@ export class PostgresAdapter implements StoragePort { anomaly_poll_interval_ms INTEGER NOT NULL DEFAULT 1000, anomaly_cache_ttl_ms INTEGER NOT NULL DEFAULT 3600000, anomaly_prometheus_interval_ms INTEGER NOT NULL DEFAULT 30000, + throughput_forecasting_enabled BOOLEAN NOT NULL DEFAULT true, + throughput_forecasting_default_rolling_window_ms INTEGER NOT NULL DEFAULT 21600000, + throughput_forecasting_default_alert_threshold_ms INTEGER NOT NULL DEFAULT 7200000, updated_at BIGINT NOT NULL DEFAULT (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT, created_at BIGINT NOT NULL DEFAULT (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT ); + -- Migration: add throughput-forecasting columns if they don't exist + ALTER TABLE app_settings ADD COLUMN IF NOT EXISTS throughput_forecasting_enabled BOOLEAN NOT NULL DEFAULT true; + ALTER TABLE app_settings ADD COLUMN IF NOT EXISTS throughput_forecasting_default_rolling_window_ms INTEGER NOT NULL DEFAULT 21600000; + ALTER TABLE app_settings ADD COLUMN IF NOT EXISTS throughput_forecasting_default_alert_threshold_ms INTEGER NOT NULL DEFAULT 7200000; + + CREATE TABLE IF NOT EXISTS throughput_settings ( + connection_id TEXT PRIMARY KEY, + enabled BOOLEAN NOT NULL DEFAULT true, + ops_ceiling INTEGER, + rolling_window_ms INTEGER NOT NULL DEFAULT 21600000, + alert_threshold_ms INTEGER NOT NULL DEFAULT 7200000, + updated_at BIGINT NOT NULL + ); + CREATE TABLE IF NOT EXISTS webhooks ( id UUID PRIMARY KEY DEFAULT gen_random_uuid(), name VARCHAR(255) NOT NULL, @@ -1439,7 +1488,7 @@ export class PostgresAdapter implements StoragePort { event.sourceHost || null, event.sourcePort || null, connectionId, - ] + ], ); return event.id; @@ -1496,7 +1545,7 @@ export class PostgresAdapter implements StoragePort { resolved = EXCLUDED.resolved, resolved_at = EXCLUDED.resolved_at, duration_ms = EXCLUDED.duration_ms`, - values + values, ); return result.rowCount ?? 0; @@ -1549,13 +1598,17 @@ export class PostgresAdapter implements StoragePort { ${whereClause} ORDER BY timestamp DESC LIMIT $${paramIndex++} OFFSET $${paramIndex++}`, - [...params, limit, offset] + [...params, limit, offset], ); return result.rows.map((row) => this.mappers.mapAnomalyEventRow(row)); } - async getAnomalyStats(startTime?: number, endTime?: number, connectionId?: string): Promise { + async getAnomalyStats( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -1579,30 +1632,31 @@ export class PostgresAdapter implements StoragePort { const totalResult = await this.pool.query( `SELECT COUNT(*) as total FROM anomaly_events ${whereClause}`, - params + params, ); const severityResult = await this.pool.query( `SELECT severity, COUNT(*) as count FROM anomaly_events ${whereClause} GROUP BY severity`, - params + params, ); const metricResult = await this.pool.query( `SELECT metric_type, COUNT(*) as count FROM anomaly_events ${whereClause} GROUP BY metric_type`, - params + params, ); const unresolvedConditions = [...conditions]; if (unresolvedConditions.length > 0) { unresolvedConditions.push(`resolved = false`); } - const unresolvedWhereClause = unresolvedConditions.length > 0 - ? `WHERE ${unresolvedConditions.join(' AND ')}` - : 'WHERE resolved = false'; + const unresolvedWhereClause = + unresolvedConditions.length > 0 + ? `WHERE ${unresolvedConditions.join(' AND ')}` + : 'WHERE resolved = false'; const unresolvedResult = await this.pool.query( `SELECT COUNT(*) as count FROM anomaly_events ${unresolvedWhereClause}`, - params + params, ); const bySeverity: Record = {}; @@ -1631,7 +1685,7 @@ export class PostgresAdapter implements StoragePort { `UPDATE anomaly_events SET resolved = true, resolved_at = $2, duration_ms = $2 - timestamp WHERE id = $1 AND resolved = false`, - [id, resolvedAt] + [id, resolvedAt], ); return (result.rowCount ?? 0) > 0; @@ -1643,15 +1697,14 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'DELETE FROM anomaly_events WHERE timestamp < $1 AND connection_id = $2', - [cutoffTimestamp, connectionId] + [cutoffTimestamp, connectionId], ); return result.rowCount ?? 0; } - const result = await this.pool.query( - 'DELETE FROM anomaly_events WHERE timestamp < $1', - [cutoffTimestamp] - ); + const result = await this.pool.query('DELETE FROM anomaly_events WHERE timestamp < $1', [ + cutoffTimestamp, + ]); return result.rowCount ?? 0; } @@ -1681,7 +1734,7 @@ export class PostgresAdapter implements StoragePort { group.sourceHost || null, group.sourcePort || null, connectionId, - ] + ], ); return group.correlationId; @@ -1728,7 +1781,7 @@ export class PostgresAdapter implements StoragePort { ${whereClause} ORDER BY timestamp DESC LIMIT $${paramIndex++} OFFSET $${paramIndex++}`, - [...params, limit, offset] + [...params, limit, offset], ); return result.rows.map((row) => this.mappers.mapCorrelatedGroupRow(row)); @@ -1740,20 +1793,23 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'DELETE FROM correlated_anomaly_groups WHERE timestamp < $1 AND connection_id = $2', - [cutoffTimestamp, connectionId] + [cutoffTimestamp, connectionId], ); return result.rowCount ?? 0; } const result = await this.pool.query( 'DELETE FROM correlated_anomaly_groups WHERE timestamp < $1', - [cutoffTimestamp] + [cutoffTimestamp], ); return result.rowCount ?? 0; } - async saveKeyPatternSnapshots(snapshots: KeyPatternSnapshot[], connectionId: string): Promise { + async saveKeyPatternSnapshots( + snapshots: KeyPatternSnapshot[], + connectionId: string, + ): Promise { if (!this.pool || snapshots.length === 0) return 0; const values: string[] = []; @@ -1761,7 +1817,9 @@ export class PostgresAdapter implements StoragePort { let paramIndex = 1; for (const snapshot of snapshots) { - values.push(`($${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++})`); + values.push( + `($${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++})`, + ); params.push( snapshot.id, snapshot.timestamp, @@ -1785,7 +1843,8 @@ export class PostgresAdapter implements StoragePort { ); } - await this.pool.query(` + await this.pool.query( + ` INSERT INTO key_pattern_snapshots ( id, timestamp, pattern, key_count, sampled_key_count, keys_with_ttl, keys_expiring_soon, total_memory_bytes, @@ -1793,12 +1852,16 @@ export class PostgresAdapter implements StoragePort { hot_key_count, cold_key_count, avg_idle_time_seconds, stale_key_count, avg_ttl_seconds, min_ttl_seconds, max_ttl_seconds, connection_id ) VALUES ${values.join(', ')} - `, params); + `, + params, + ); return snapshots.length; } - async getKeyPatternSnapshots(options: KeyPatternQueryOptions = {}): Promise { + async getKeyPatternSnapshots( + options: KeyPatternQueryOptions = {}, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -1826,17 +1889,24 @@ export class PostgresAdapter implements StoragePort { const limit = options.limit ?? 100; const offset = options.offset ?? 0; - const result = await this.pool.query(` + const result = await this.pool.query( + ` SELECT * FROM key_pattern_snapshots ${whereClause} ORDER BY timestamp DESC LIMIT $${paramIndex++} OFFSET $${paramIndex++} - `, [...params, limit, offset]); + `, + [...params, limit, offset], + ); return result.rows.map((row) => this.mappers.mapKeyPatternSnapshotRow(row)); } - async getKeyAnalyticsSummary(startTime?: number, endTime?: number, connectionId?: string): Promise { + async getKeyAnalyticsSummary( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -1858,25 +1928,33 @@ export class PostgresAdapter implements StoragePort { const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : ''; - const latestSnapshotsResult = await this.pool.query(` + const latestSnapshotsResult = await this.pool.query( + ` SELECT pattern, MAX(timestamp) as latest_timestamp FROM key_pattern_snapshots ${whereClause} GROUP BY pattern - `, params); + `, + params, + ); if (latestSnapshotsResult.rows.length === 0) return null; - const patternConditions = latestSnapshotsResult.rows.map(() => '(pattern = ? AND timestamp = ?)').join(' OR '); + const patternConditions = latestSnapshotsResult.rows + .map(() => '(pattern = ? AND timestamp = ?)') + .join(' OR '); const patternParams: any[] = []; for (const row of latestSnapshotsResult.rows) { patternParams.push(row.pattern, row.latest_timestamp); } let pIdx = 1; - const patternPlaceholders = latestSnapshotsResult.rows.map(() => `(pattern = $${pIdx++} AND timestamp = $${pIdx++})`).join(' OR '); + const patternPlaceholders = latestSnapshotsResult.rows + .map(() => `(pattern = $${pIdx++} AND timestamp = $${pIdx++})`) + .join(' OR '); - const summaryResult = await this.pool.query(` + const summaryResult = await this.pool.query( + ` SELECT COUNT(DISTINCT pattern) as total_patterns, SUM(key_count) as total_keys, @@ -1887,15 +1965,20 @@ export class PostgresAdapter implements StoragePort { SUM(keys_expiring_soon) as keys_expiring_soon FROM key_pattern_snapshots WHERE ${patternPlaceholders} - `, patternParams); + `, + patternParams, + ); const summary = summaryResult.rows[0]; - const patternRowsResult = await this.pool.query(` + const patternRowsResult = await this.pool.query( + ` SELECT pattern, key_count, total_memory_bytes, avg_memory_bytes, stale_key_count, hot_key_count, cold_key_count FROM key_pattern_snapshots WHERE ${patternPlaceholders} - `, patternParams); + `, + patternParams, + ); const byPattern: Record = {}; for (const row of patternRowsResult.rows) { @@ -1909,14 +1992,21 @@ export class PostgresAdapter implements StoragePort { }; } - const timeRangeResult = await this.pool.query(` + const timeRangeResult = await this.pool.query( + ` SELECT MIN(timestamp) as earliest, MAX(timestamp) as latest FROM key_pattern_snapshots ${whereClause} - `, params); + `, + params, + ); - const timeRange = timeRangeResult.rows[0].earliest !== null && timeRangeResult.rows[0].latest !== null - ? { earliest: parseInt(timeRangeResult.rows[0].earliest), latest: parseInt(timeRangeResult.rows[0].latest) } - : null; + const timeRange = + timeRangeResult.rows[0].earliest !== null && timeRangeResult.rows[0].latest !== null + ? { + earliest: parseInt(timeRangeResult.rows[0].earliest), + latest: parseInt(timeRangeResult.rows[0].latest), + } + : null; return { totalPatterns: parseInt(summary.total_patterns) || 0, @@ -1931,12 +2021,19 @@ export class PostgresAdapter implements StoragePort { }; } - async getKeyPatternTrends(pattern: string, startTime: number, endTime: number, connectionId?: string): Promise> { + async getKeyPatternTrends( + pattern: string, + startTime: number, + endTime: number, + connectionId?: string, + ): Promise< + Array<{ + timestamp: number; + keyCount: number; + memoryBytes: number; + staleCount: number; + }> + > { if (!this.pool) throw new Error('Database not initialized'); const conditions = ['pattern = $1', 'timestamp >= $2', 'timestamp <= $3']; @@ -1947,14 +2044,17 @@ export class PostgresAdapter implements StoragePort { params.push(connectionId); } - const result = await this.pool.query(` + const result = await this.pool.query( + ` SELECT timestamp, key_count, total_memory_bytes, stale_key_count FROM key_pattern_snapshots WHERE ${conditions.join(' AND ')} ORDER BY timestamp ASC - `, params); + `, + params, + ); - return result.rows.map(row => ({ + return result.rows.map((row) => ({ timestamp: parseInt(row.timestamp), keyCount: row.key_count, memoryBytes: parseInt(row.total_memory_bytes), @@ -1962,26 +2062,28 @@ export class PostgresAdapter implements StoragePort { })); } - async pruneOldKeyPatternSnapshots(cutoffTimestamp: number, connectionId?: string): Promise { + async pruneOldKeyPatternSnapshots( + cutoffTimestamp: number, + connectionId?: string, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); if (connectionId) { const result = await this.pool.query( 'DELETE FROM key_pattern_snapshots WHERE timestamp < $1 AND connection_id = $2', - [cutoffTimestamp, connectionId] + [cutoffTimestamp, connectionId], ); return result.rowCount ?? 0; } - const result = await this.pool.query( - 'DELETE FROM key_pattern_snapshots WHERE timestamp < $1', - [cutoffTimestamp] - ); + const result = await this.pool.query('DELETE FROM key_pattern_snapshots WHERE timestamp < $1', [ + cutoffTimestamp, + ]); return result.rowCount ?? 0; } - async saveHotKeys(entries: import('../../common/interfaces/storage-port.interface').HotKeyEntry[], connectionId: string): Promise { + async saveHotKeys(entries: HotKeyEntry[], connectionId: string): Promise { if (!this.pool || entries.length === 0) return 0; const values: string[] = []; @@ -1989,7 +2091,9 @@ export class PostgresAdapter implements StoragePort { let paramIndex = 1; for (const entry of entries) { - values.push(`($${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++})`); + values.push( + `($${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++})`, + ); params.push( entry.id, entry.keyName, @@ -2004,17 +2108,20 @@ export class PostgresAdapter implements StoragePort { ); } - await this.pool.query(` + await this.pool.query( + ` INSERT INTO hot_key_stats ( id, key_name, connection_id, captured_at, signal_type, freq_score, idle_seconds, memory_bytes, ttl, rank ) VALUES ${values.join(', ')} - `, params); + `, + params, + ); return entries.length; } - async getHotKeys(options: import('../../common/interfaces/storage-port.interface').HotKeyQueryOptions = {}): Promise { + async getHotKeys(options: HotKeyQueryOptions = {}): Promise { if (!this.pool) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -2056,14 +2163,17 @@ export class PostgresAdapter implements StoragePort { const limit = options.limit ?? 50; const offset = options.offset ?? 0; - const result = await this.pool.query(` + const result = await this.pool.query( + ` SELECT id, key_name, connection_id, captured_at, signal_type, freq_score, idle_seconds, memory_bytes, ttl, rank FROM hot_key_stats ${whereClause} ORDER BY captured_at DESC, rank ASC LIMIT $${paramIndex++} OFFSET $${paramIndex++} - `, [...params, limit, offset]); + `, + [...params, limit, offset], + ); return result.rows.map((row: any) => ({ id: row.id, @@ -2085,15 +2195,14 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'DELETE FROM hot_key_stats WHERE captured_at < $1 AND connection_id = $2', - [cutoffTimestamp, connectionId] + [cutoffTimestamp, connectionId], ); return result.rowCount ?? 0; } - const result = await this.pool.query( - 'DELETE FROM hot_key_stats WHERE captured_at < $1', - [cutoffTimestamp] - ); + const result = await this.pool.query('DELETE FROM hot_key_stats WHERE captured_at < $1', [ + cutoffTimestamp, + ]); return result.rowCount ?? 0; } @@ -2117,14 +2226,18 @@ export class PostgresAdapter implements StoragePort { `INSERT INTO app_settings ( id, audit_poll_interval_ms, client_analytics_poll_interval_ms, anomaly_poll_interval_ms, anomaly_cache_ttl_ms, anomaly_prometheus_interval_ms, + throughput_forecasting_enabled, throughput_forecasting_default_rolling_window_ms, throughput_forecasting_default_alert_threshold_ms, updated_at, created_at - ) VALUES (1, $1, $2, $3, $4, $5, $6, $7) + ) VALUES (1, $1, $2, $3, $4, $5, $6, $7, $8, $9, $10) ON CONFLICT(id) DO UPDATE SET audit_poll_interval_ms = EXCLUDED.audit_poll_interval_ms, client_analytics_poll_interval_ms = EXCLUDED.client_analytics_poll_interval_ms, anomaly_poll_interval_ms = EXCLUDED.anomaly_poll_interval_ms, anomaly_cache_ttl_ms = EXCLUDED.anomaly_cache_ttl_ms, anomaly_prometheus_interval_ms = EXCLUDED.anomaly_prometheus_interval_ms, + throughput_forecasting_enabled = EXCLUDED.throughput_forecasting_enabled, + throughput_forecasting_default_rolling_window_ms = EXCLUDED.throughput_forecasting_default_rolling_window_ms, + throughput_forecasting_default_alert_threshold_ms = EXCLUDED.throughput_forecasting_default_alert_threshold_ms, updated_at = EXCLUDED.updated_at`, [ settings.auditPollIntervalMs, @@ -2132,9 +2245,12 @@ export class PostgresAdapter implements StoragePort { settings.anomalyPollIntervalMs, settings.anomalyCacheTtlMs, settings.anomalyPrometheusIntervalMs, + settings.throughputForecastingEnabled, + settings.throughputForecastingDefaultRollingWindowMs, + settings.throughputForecastingDefaultAlertThresholdMs, now, - settings.createdAt || now - ] + settings.createdAt || now, + ], ); const saved = await this.getSettings(); @@ -2180,7 +2296,7 @@ export class PostgresAdapter implements StoragePort { webhook.alertConfig ? JSON.stringify(webhook.alertConfig) : null, webhook.thresholds ? JSON.stringify(webhook.thresholds) : null, webhook.connectionId || null, - ] + ], ); return this.mappers.mapWebhookRow(result.rows[0]); @@ -2201,13 +2317,15 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'SELECT * FROM webhooks WHERE connection_id = $1 OR connection_id IS NULL ORDER BY created_at DESC', - [connectionId] + [connectionId], ); return result.rows.map((row) => this.mappers.mapWebhookRow(row)); } // No connectionId provided - only return global webhooks (not scoped to any connection) - const result = await this.pool.query('SELECT * FROM webhooks WHERE connection_id IS NULL ORDER BY created_at DESC'); + const result = await this.pool.query( + 'SELECT * FROM webhooks WHERE connection_id IS NULL ORDER BY created_at DESC', + ); return result.rows.map((row) => this.mappers.mapWebhookRow(row)); } @@ -2218,7 +2336,7 @@ export class PostgresAdapter implements StoragePort { // Return webhooks scoped to this connection OR global webhooks (no connectionId) const result = await this.pool.query( 'SELECT * FROM webhooks WHERE enabled = true AND $1 = ANY(events) AND (connection_id = $2 OR connection_id IS NULL)', - [event, connectionId] + [event, connectionId], ); return result.rows.map((row) => this.mappers.mapWebhookRow(row)); } @@ -2226,13 +2344,16 @@ export class PostgresAdapter implements StoragePort { // No connectionId provided - only return global webhooks (not scoped to any connection) const result = await this.pool.query( 'SELECT * FROM webhooks WHERE enabled = true AND $1 = ANY(events) AND connection_id IS NULL', - [event] + [event], ); return result.rows.map((row) => this.mappers.mapWebhookRow(row)); } - async updateWebhook(id: string, updates: Partial>): Promise { + async updateWebhook( + id: string, + updates: Partial>, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const setClauses: string[] = []; @@ -2293,7 +2414,7 @@ export class PostgresAdapter implements StoragePort { const result = await this.pool.query( `UPDATE webhooks SET ${setClauses.join(', ')} WHERE id = $${paramIndex} RETURNING *`, - params + params, ); if (result.rows.length === 0) return null; @@ -2308,7 +2429,9 @@ export class PostgresAdapter implements StoragePort { return (result.rowCount ?? 0) > 0; } - async createDelivery(delivery: Omit): Promise { + async createDelivery( + delivery: Omit, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const result = await this.pool.query( @@ -2330,7 +2453,7 @@ export class PostgresAdapter implements StoragePort { delivery.completedAt ? new Date(delivery.completedAt) : null, delivery.durationMs || null, delivery.connectionId || null, - ] + ], ); return this.mappers.mapDeliveryRow(result.rows[0]); @@ -2345,18 +2468,25 @@ export class PostgresAdapter implements StoragePort { return this.mappers.mapDeliveryRow(result.rows[0]); } - async getDeliveriesByWebhook(webhookId: string, limit: number = 50, offset: number = 0): Promise { + async getDeliveriesByWebhook( + webhookId: string, + limit: number = 50, + offset: number = 0, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const result = await this.pool.query( 'SELECT * FROM webhook_deliveries WHERE webhook_id = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3', - [webhookId, limit, offset] + [webhookId, limit, offset], ); return result.rows.map((row) => this.mappers.mapDeliveryRow(row)); } - async updateDelivery(id: string, updates: Partial>): Promise { + async updateDelivery( + id: string, + updates: Partial>, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const setClauses: string[] = []; @@ -2398,13 +2528,16 @@ export class PostgresAdapter implements StoragePort { const result = await this.pool.query( `UPDATE webhook_deliveries SET ${setClauses.join(', ')} WHERE id = $${paramIndex}`, - params + params, ); return (result.rowCount ?? 0) > 0; } - async getRetriableDeliveries(limit: number = 100, connectionId?: string): Promise { + async getRetriableDeliveries( + limit: number = 100, + connectionId?: string, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); if (connectionId) { @@ -2416,7 +2549,7 @@ export class PostgresAdapter implements StoragePort { ORDER BY next_retry_at ASC LIMIT $1 FOR UPDATE SKIP LOCKED`, - [limit, connectionId] + [limit, connectionId], ); return result.rows.map((row) => this.mappers.mapDeliveryRow(row)); } @@ -2428,7 +2561,7 @@ export class PostgresAdapter implements StoragePort { ORDER BY next_retry_at ASC LIMIT $1 FOR UPDATE SKIP LOCKED`, - [limit] + [limit], ); return result.rows.map((row) => this.mappers.mapDeliveryRow(row)); @@ -2440,14 +2573,14 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'DELETE FROM webhook_deliveries WHERE EXTRACT(EPOCH FROM created_at) * 1000 < $1 AND connection_id = $2', - [cutoffTimestamp, connectionId] + [cutoffTimestamp, connectionId], ); return result.rowCount ?? 0; } const result = await this.pool.query( 'DELETE FROM webhook_deliveries WHERE EXTRACT(EPOCH FROM created_at) * 1000 < $1', - [cutoffTimestamp] + [cutoffTimestamp], ); return result.rowCount ?? 0; @@ -2470,7 +2603,7 @@ export class PostgresAdapter implements StoragePort { entry.id, entry.timestamp, entry.duration, - entry.command, // PostgreSQL will accept string[] for TEXT[] + entry.command, // PostgreSQL will accept string[] for TEXT[] entry.clientAddress || '', entry.clientName || '', entry.capturedAt, @@ -2537,7 +2670,7 @@ export class PostgresAdapter implements StoragePort { ${whereClause} ORDER BY timestamp DESC LIMIT $${paramIndex++} OFFSET $${paramIndex++}`, - [...params, limit, offset] + [...params, limit, offset], ); return result.rows.map((row) => this.mappers.mapSlowLogEntryRow(row)); @@ -2549,15 +2682,13 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'SELECT MAX(slowlog_id) as max_id FROM slow_log_entries WHERE connection_id = $1', - [connectionId] + [connectionId], ); const maxId = result.rows[0]?.max_id; return maxId !== null && maxId !== undefined ? Number(maxId) : null; } - const result = await this.pool.query( - 'SELECT MAX(slowlog_id) as max_id FROM slow_log_entries' - ); + const result = await this.pool.query('SELECT MAX(slowlog_id) as max_id FROM slow_log_entries'); const maxId = result.rows[0]?.max_id; return maxId !== null && maxId !== undefined ? Number(maxId) : null; @@ -2569,21 +2700,23 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'DELETE FROM slow_log_entries WHERE captured_at < $1 AND connection_id = $2', - [cutoffTimestamp, connectionId] + [cutoffTimestamp, connectionId], ); return result.rowCount ?? 0; } - const result = await this.pool.query( - 'DELETE FROM slow_log_entries WHERE captured_at < $1', - [cutoffTimestamp] - ); + const result = await this.pool.query('DELETE FROM slow_log_entries WHERE captured_at < $1', [ + cutoffTimestamp, + ]); return result.rowCount ?? 0; } // Command Log Methods (Valkey-specific) - async saveCommandLogEntries(entries: StoredCommandLogEntry[], connectionId: string): Promise { + async saveCommandLogEntries( + entries: StoredCommandLogEntry[], + connectionId: string, + ): Promise { if (!this.pool || entries.length === 0) return 0; const values: any[] = []; @@ -2623,7 +2756,9 @@ export class PostgresAdapter implements StoragePort { return result.rowCount ?? 0; } - async getCommandLogEntries(options: CommandLogQueryOptions = {}): Promise { + async getCommandLogEntries( + options: CommandLogQueryOptions = {}, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -2671,7 +2806,7 @@ export class PostgresAdapter implements StoragePort { ${whereClause} ORDER BY timestamp DESC LIMIT $${paramIndex++} OFFSET $${paramIndex++}`, - [...params, limit, offset] + [...params, limit, offset], ); return result.rows.map((row) => this.mappers.mapCommandLogEntryRow(row)); @@ -2683,7 +2818,7 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'SELECT MAX(commandlog_id) as max_id FROM command_log_entries WHERE log_type = $1 AND connection_id = $2', - [type, connectionId] + [type, connectionId], ); const maxId = result.rows[0]?.max_id; return maxId !== null && maxId !== undefined ? Number(maxId) : null; @@ -2691,7 +2826,7 @@ export class PostgresAdapter implements StoragePort { const result = await this.pool.query( 'SELECT MAX(commandlog_id) as max_id FROM command_log_entries WHERE log_type = $1', - [type] + [type], ); const maxId = result.rows[0]?.max_id; @@ -2704,21 +2839,23 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'DELETE FROM command_log_entries WHERE captured_at < $1 AND connection_id = $2', - [cutoffTimestamp, connectionId] + [cutoffTimestamp, connectionId], ); return result.rowCount ?? 0; } - const result = await this.pool.query( - 'DELETE FROM command_log_entries WHERE captured_at < $1', - [cutoffTimestamp] - ); + const result = await this.pool.query('DELETE FROM command_log_entries WHERE captured_at < $1', [ + cutoffTimestamp, + ]); return result.rowCount ?? 0; } // Latency Snapshot Methods - async saveLatencySnapshots(snapshots: StoredLatencySnapshot[], connectionId: string): Promise { + async saveLatencySnapshots( + snapshots: StoredLatencySnapshot[], + connectionId: string, + ): Promise { if (!this.pool || snapshots.length === 0) return 0; const values: any[] = []; @@ -2749,7 +2886,9 @@ export class PostgresAdapter implements StoragePort { return result.rowCount ?? 0; } - async getLatencySnapshots(options: LatencySnapshotQueryOptions = {}): Promise { + async getLatencySnapshots( + options: LatencySnapshotQueryOptions = {}, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -2799,20 +2938,22 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'DELETE FROM latency_snapshots WHERE timestamp < $1 AND connection_id = $2', - [cutoffTimestamp, connectionId] + [cutoffTimestamp, connectionId], ); return result.rowCount ?? 0; } - const result = await this.pool.query( - 'DELETE FROM latency_snapshots WHERE timestamp < $1', - [cutoffTimestamp] - ); + const result = await this.pool.query('DELETE FROM latency_snapshots WHERE timestamp < $1', [ + cutoffTimestamp, + ]); return result.rowCount ?? 0; } // Latency Histogram Methods - async saveLatencyHistogram(histogram: import('../../common/interfaces/storage-port.interface').StoredLatencyHistogram, connectionId: string): Promise { + async saveLatencyHistogram( + histogram: StoredLatencyHistogram, + connectionId: string, + ): Promise { if (!this.pool) return 0; const result = await this.pool.query( @@ -2823,7 +2964,9 @@ export class PostgresAdapter implements StoragePort { return result.rowCount ?? 0; } - async getLatencyHistograms(options: { connectionId?: string; startTime?: number; endTime?: number; limit?: number } = {}): Promise { + async getLatencyHistograms( + options: { connectionId?: string; startTime?: number; endTime?: number; limit?: number } = {}, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -2859,7 +3002,10 @@ export class PostgresAdapter implements StoragePort { return result.rows.map((row: any) => ({ id: row.id, timestamp: Number(row.timestamp), - data: typeof row.histogram_data === 'string' ? JSON.parse(row.histogram_data) : row.histogram_data, + data: + typeof row.histogram_data === 'string' + ? JSON.parse(row.histogram_data) + : row.histogram_data, connectionId: row.connection_id, })); } @@ -2870,20 +3016,22 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'DELETE FROM latency_histograms WHERE timestamp < $1 AND connection_id = $2', - [cutoffTimestamp, connectionId] + [cutoffTimestamp, connectionId], ); return result.rowCount ?? 0; } - const result = await this.pool.query( - 'DELETE FROM latency_histograms WHERE timestamp < $1', - [cutoffTimestamp] - ); + const result = await this.pool.query('DELETE FROM latency_histograms WHERE timestamp < $1', [ + cutoffTimestamp, + ]); return result.rowCount ?? 0; } // Memory Snapshot Methods - async saveMemorySnapshots(snapshots: StoredMemorySnapshot[], connectionId: string): Promise { + async saveMemorySnapshots( + snapshots: StoredMemorySnapshot[], + connectionId: string, + ): Promise { if (!this.pool || snapshots.length === 0) return 0; const values: any[] = []; @@ -2927,7 +3075,9 @@ export class PostgresAdapter implements StoragePort { return result.rowCount ?? 0; } - async getMemorySnapshots(options: MemorySnapshotQueryOptions = {}): Promise { + async getMemorySnapshots( + options: MemorySnapshotQueryOptions = {}, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -2987,20 +3137,22 @@ export class PostgresAdapter implements StoragePort { if (connectionId) { const result = await this.pool.query( 'DELETE FROM memory_snapshots WHERE timestamp < $1 AND connection_id = $2', - [cutoffTimestamp, connectionId] + [cutoffTimestamp, connectionId], ); return result.rowCount ?? 0; } - const result = await this.pool.query( - 'DELETE FROM memory_snapshots WHERE timestamp < $1', - [cutoffTimestamp] - ); + const result = await this.pool.query('DELETE FROM memory_snapshots WHERE timestamp < $1', [ + cutoffTimestamp, + ]); return result.rowCount ?? 0; } // Vector Index Snapshot Methods - async saveVectorIndexSnapshots(snapshots: VectorIndexSnapshot[], connectionId: string): Promise { + async saveVectorIndexSnapshots( + snapshots: VectorIndexSnapshot[], + connectionId: string, + ): Promise { if (!this.pool || snapshots.length === 0) return 0; const values: any[] = []; @@ -3008,7 +3160,9 @@ export class PostgresAdapter implements StoragePort { let paramIndex = 1; for (const snapshot of snapshots) { - placeholders.push(`($${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++})`); + placeholders.push( + `($${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++})`, + ); values.push( snapshot.id, snapshot.timestamp, @@ -3029,7 +3183,9 @@ export class PostgresAdapter implements StoragePort { return result.rowCount ?? 0; } - async getVectorIndexSnapshots(options: VectorIndexSnapshotQueryOptions = {}): Promise { + async getVectorIndexSnapshots( + options: VectorIndexSnapshotQueryOptions = {}, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -3076,29 +3232,33 @@ export class PostgresAdapter implements StoragePort { })); } - async pruneOldVectorIndexSnapshots(cutoffTimestamp: number, connectionId?: string): Promise { + async pruneOldVectorIndexSnapshots( + cutoffTimestamp: number, + connectionId?: string, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); if (connectionId) { const result = await this.pool.query( 'DELETE FROM vector_index_snapshots WHERE timestamp < $1 AND connection_id = $2', - [cutoffTimestamp, connectionId] + [cutoffTimestamp, connectionId], ); return result.rowCount ?? 0; } const result = await this.pool.query( 'DELETE FROM vector_index_snapshots WHERE timestamp < $1', - [cutoffTimestamp] + [cutoffTimestamp], ); return result.rowCount ?? 0; } // Connection Management Methods - async saveConnection(config: import('../../common/interfaces/storage-port.interface').DatabaseConnectionConfig): Promise { + async saveConnection(config: DatabaseConnectionConfig): Promise { if (!this.pool) throw new Error('Database not initialized'); - await this.pool.query(` + await this.pool.query( + ` INSERT INTO connections (id, name, host, port, username, password, password_encrypted, db_index, tls, is_default, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) ON CONFLICT(id) DO UPDATE SET @@ -3112,28 +3272,30 @@ export class PostgresAdapter implements StoragePort { tls = EXCLUDED.tls, is_default = EXCLUDED.is_default, updated_at = EXCLUDED.updated_at - `, [ - config.id, - config.name, - config.host, - config.port, - config.username || null, - config.password || null, - config.passwordEncrypted || false, - config.dbIndex || 0, - config.tls || false, - config.isDefault || false, - config.createdAt, - config.updatedAt || null, - ]); + `, + [ + config.id, + config.name, + config.host, + config.port, + config.username || null, + config.password || null, + config.passwordEncrypted || false, + config.dbIndex || 0, + config.tls || false, + config.isDefault || false, + config.createdAt, + config.updatedAt || null, + ], + ); } - async getConnections(): Promise { + async getConnections(): Promise { if (!this.pool) throw new Error('Database not initialized'); const result = await this.pool.query('SELECT * FROM connections ORDER BY created_at ASC'); - return result.rows.map(row => ({ + return result.rows.map((row) => ({ id: row.id, name: row.name, host: row.host, @@ -3149,7 +3311,7 @@ export class PostgresAdapter implements StoragePort { })); } - async getConnection(id: string): Promise { + async getConnection(id: string): Promise { if (!this.pool) throw new Error('Database not initialized'); const result = await this.pool.query('SELECT * FROM connections WHERE id = $1', [id]); @@ -3177,7 +3339,7 @@ export class PostgresAdapter implements StoragePort { await this.pool.query('DELETE FROM connections WHERE id = $1', [id]); } - async updateConnection(id: string, updates: Partial): Promise { + async updateConnection(id: string, updates: Partial): Promise { if (!this.pool) throw new Error('Database not initialized'); const setClauses: string[] = []; @@ -3225,13 +3387,22 @@ export class PostgresAdapter implements StoragePort { await this.pool.query( `UPDATE connections SET ${setClauses.join(', ')} WHERE id = $${paramIndex}`, - params + params, ); } // Agent Token Methods - async saveAgentToken(token: { id: string; name: string; type: 'agent' | 'mcp'; tokenHash: string; createdAt: number; expiresAt: number; revokedAt: number | null; lastUsedAt: number | null }): Promise { + async saveAgentToken(token: { + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + }): Promise { if (!this.pool) throw new Error('Database not initialized'); await this.pool.query( `INSERT INTO agent_tokens (id, name, type, token_hash, created_at, expires_at, revoked_at, last_used_at) @@ -3243,11 +3414,31 @@ export class PostgresAdapter implements StoragePort { expires_at = EXCLUDED.expires_at, revoked_at = EXCLUDED.revoked_at, last_used_at = EXCLUDED.last_used_at`, - [token.id, token.name, token.type, token.tokenHash, token.createdAt, token.expiresAt, token.revokedAt, token.lastUsedAt] + [ + token.id, + token.name, + token.type, + token.tokenHash, + token.createdAt, + token.expiresAt, + token.revokedAt, + token.lastUsedAt, + ], ); } - async getAgentTokens(type?: 'agent' | 'mcp'): Promise> { + async getAgentTokens(type?: 'agent' | 'mcp'): Promise< + Array<{ + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + }> + > { if (!this.pool) throw new Error('Database not initialized'); const query = type ? `SELECT id, name, type, token_hash, created_at, expires_at, revoked_at, last_used_at @@ -3267,12 +3458,21 @@ export class PostgresAdapter implements StoragePort { })); } - async getAgentTokenByHash(hash: string): Promise<{ id: string; name: string; type: 'agent' | 'mcp'; tokenHash: string; createdAt: number; expiresAt: number; revokedAt: number | null; lastUsedAt: number | null } | null> { + async getAgentTokenByHash(hash: string): Promise<{ + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + } | null> { if (!this.pool) throw new Error('Database not initialized'); const result = await this.pool.query( `SELECT id, name, type, token_hash, created_at, expires_at, revoked_at, last_used_at FROM agent_tokens WHERE token_hash = $1`, - [hash] + [hash], ); if (result.rows.length === 0) return null; const row = result.rows[0]; @@ -3290,17 +3490,91 @@ export class PostgresAdapter implements StoragePort { async revokeAgentToken(id: string): Promise { if (!this.pool) throw new Error('Database not initialized'); - await this.pool.query( - `UPDATE agent_tokens SET revoked_at = $1 WHERE id = $2`, - [Date.now(), id] - ); + await this.pool.query(`UPDATE agent_tokens SET revoked_at = $1 WHERE id = $2`, [ + Date.now(), + id, + ]); } async updateAgentTokenLastUsed(id: string): Promise { if (!this.pool) throw new Error('Database not initialized'); + await this.pool.query(`UPDATE agent_tokens SET last_used_at = $1 WHERE id = $2`, [ + Date.now(), + id, + ]); + } + + // Throughput Forecasting Settings + async getThroughputSettings(connectionId: string): Promise { + if (!this.pool) { + throw new Error('Database not initialized'); + } + const result = await this.pool.query( + 'SELECT * FROM throughput_settings WHERE connection_id = $1', + [connectionId], + ); + if (result.rows.length === 0) { + return null; + } + const row = result.rows[0]; + return { + connectionId: row.connection_id, + enabled: row.enabled, + opsCeiling: row.ops_ceiling ?? null, + rollingWindowMs: row.rolling_window_ms, + alertThresholdMs: row.alert_threshold_ms, + updatedAt: Number(row.updated_at), + }; + } + + async saveThroughputSettings(settings: ThroughputSettings): Promise { + if (!this.pool) { + throw new Error('Database not initialized'); + } await this.pool.query( - `UPDATE agent_tokens SET last_used_at = $1 WHERE id = $2`, - [Date.now(), id] + ` + INSERT INTO throughput_settings (connection_id, enabled, ops_ceiling, rolling_window_ms, alert_threshold_ms, updated_at) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT(connection_id) DO UPDATE SET + enabled = EXCLUDED.enabled, + ops_ceiling = EXCLUDED.ops_ceiling, + rolling_window_ms = EXCLUDED.rolling_window_ms, + alert_threshold_ms = EXCLUDED.alert_threshold_ms, + updated_at = EXCLUDED.updated_at + `, + [ + settings.connectionId, + settings.enabled, + settings.opsCeiling, + settings.rollingWindowMs, + settings.alertThresholdMs, + settings.updatedAt, + ], + ); + return { ...settings }; + } + + async deleteThroughputSettings(connectionId: string): Promise { + if (!this.pool) throw new Error('Database not initialized'); + const result = await this.pool.query( + 'DELETE FROM throughput_settings WHERE connection_id = $1', + [connectionId], + ); + return (result.rowCount ?? 0) > 0; + } + + async getActiveThroughputSettings(): Promise { + if (!this.pool) throw new Error('Database not initialized'); + const result = await this.pool.query( + 'SELECT * FROM throughput_settings WHERE enabled = true AND ops_ceiling IS NOT NULL', ); + return result.rows.map((row: any) => ({ + connectionId: row.connection_id, + enabled: row.enabled, + opsCeiling: row.ops_ceiling, + rollingWindowMs: row.rolling_window_ms, + alertThresholdMs: row.alert_threshold_ms, + updatedAt: Number(row.updated_at), + })); } } diff --git a/apps/api/src/storage/adapters/sqlite.adapter.ts b/apps/api/src/storage/adapters/sqlite.adapter.ts index 50b00780..91ce3b4b 100644 --- a/apps/api/src/storage/adapters/sqlite.adapter.ts +++ b/apps/api/src/storage/adapters/sqlite.adapter.ts @@ -25,7 +25,6 @@ import { Webhook, WebhookDelivery, WebhookEventType, - DeliveryStatus, StoredSlowLogEntry, SlowLogQueryOptions, StoredCommandLogEntry, @@ -35,6 +34,10 @@ import { LatencySnapshotQueryOptions, StoredMemorySnapshot, MemorySnapshotQueryOptions, + ThroughputSettings, + DatabaseConnectionConfig, + HotKeyEntry, + HotKeyQueryOptions, } from '../../common/interfaces/storage-port.interface'; import type { VectorIndexSnapshot, VectorIndexSnapshotQueryOptions } from '@betterdb/shared'; import { SqliteDialect, RowMappers } from './base-sql.adapter'; @@ -43,12 +46,21 @@ export interface SqliteAdapterConfig { filepath: string; } +type ThroughputSettingsRow = { + connection_id: string; + enabled: number; + ops_ceiling: number | null; + rolling_window_ms: number; + alert_threshold_ms: number; + updated_at: number; +} | null; + export class SqliteAdapter implements StoragePort { private db: Database.Database | null = null; private ready: boolean = false; private readonly mappers = new RowMappers(SqliteDialect); - constructor(private config: SqliteAdapterConfig) { } + constructor(private config: SqliteAdapterConfig) {} async initialize(): Promise { try { @@ -70,7 +82,9 @@ export class SqliteAdapter implements StoragePort { this.ready = true; } catch (error) { this.ready = false; - throw new Error(`Failed to initialize SQLite: ${error instanceof Error ? error.message : 'Unknown error'}`); + throw new Error( + `Failed to initialize SQLite: ${error instanceof Error ? error.message : 'Unknown error'}`, + ); } } @@ -81,8 +95,8 @@ export class SqliteAdapter implements StoragePort { if (!this.db) return; // Get existing columns in webhooks table - const tableInfo = this.db.prepare("PRAGMA table_info(webhooks)").all() as { name: string }[]; - const existingColumns = new Set(tableInfo.map(col => col.name)); + const tableInfo = this.db.prepare('PRAGMA table_info(webhooks)').all() as { name: string }[]; + const existingColumns = new Set(tableInfo.map((col) => col.name)); // Add new columns if they don't exist const newColumns = [ @@ -97,6 +111,28 @@ export class SqliteAdapter implements StoragePort { } } + // Add throughput forecasting columns to app_settings if they don't exist + const settingsInfo = this.db.prepare('PRAGMA table_info(app_settings)').all() as { + name: string; + }[]; + const settingsColumns = new Set(settingsInfo.map((col) => col.name)); + const throughputColumns = [ + { name: 'throughput_forecasting_enabled', type: 'INTEGER NOT NULL DEFAULT 1' }, + { + name: 'throughput_forecasting_default_rolling_window_ms', + type: 'INTEGER NOT NULL DEFAULT 21600000', + }, + { + name: 'throughput_forecasting_default_alert_threshold_ms', + type: 'INTEGER NOT NULL DEFAULT 7200000', + }, + ]; + for (const col of throughputColumns) { + if (!settingsColumns.has(col.name)) { + this.db.exec(`ALTER TABLE app_settings ADD COLUMN ${col.name} ${col.type}`); + } + } + // Migrate connection_id to all data tables for multi-database support this.migrateConnectionId(); } @@ -124,11 +160,13 @@ export class SqliteAdapter implements StoragePort { try { // Check if column exists const columns = this.db.prepare(`PRAGMA table_info(${table})`).all() as { name: string }[]; - if (!columns.some(c => c.name === 'connection_id')) { + if (!columns.some((c) => c.name === 'connection_id')) { // Add connection_id column with default value this.db.exec(`ALTER TABLE ${table} ADD COLUMN connection_id TEXT DEFAULT 'env-default'`); // Create index - this.db.exec(`CREATE INDEX IF NOT EXISTS idx_${table}_connection_id ON ${table}(connection_id)`); + this.db.exec( + `CREATE INDEX IF NOT EXISTS idx_${table}_connection_id ON ${table}(connection_id)`, + ); } } catch (error) { // Table might not exist yet - that's fine, createSchema will handle it @@ -252,7 +290,11 @@ export class SqliteAdapter implements StoragePort { return rows.map((row) => this.mappers.mapAclEntryRow(row)); } - async getAuditStats(startTime?: number, endTime?: number, connectionId?: string): Promise { + async getAuditStats( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise { if (!this.db) { throw new Error('Database not initialized'); } @@ -309,7 +351,9 @@ export class SqliteAdapter implements StoragePort { // Time range const timeRangeResult = this.db - .prepare(`SELECT MIN(captured_at) as earliest, MAX(captured_at) as latest FROM acl_audit ${whereClause}`) + .prepare( + `SELECT MIN(captured_at) as earliest, MAX(captured_at) as latest FROM acl_audit ${whereClause}`, + ) .get(...params) as { earliest: number | null; latest: number | null }; const timeRange = @@ -332,11 +376,15 @@ export class SqliteAdapter implements StoragePort { } if (connectionId) { - const result = this.db.prepare('DELETE FROM acl_audit WHERE captured_at < ? AND connection_id = ?').run(olderThanTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM acl_audit WHERE captured_at < ? AND connection_id = ?') + .run(olderThanTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM acl_audit WHERE captured_at < ?').run(olderThanTimestamp); + const result = this.db + .prepare('DELETE FROM acl_audit WHERE captured_at < ?') + .run(olderThanTimestamp); return result.changes; } @@ -385,7 +433,9 @@ export class SqliteAdapter implements StoragePort { return clients.length; } - async getClientSnapshots(options: ClientSnapshotQueryOptions = {}): Promise { + async getClientSnapshots( + options: ClientSnapshotQueryOptions = {}, + ): Promise { if (!this.db) { throw new Error('Database not initialized'); } @@ -445,7 +495,12 @@ export class SqliteAdapter implements StoragePort { return rows.map((row) => this.mappers.mapClientRow(row)); } - async getClientTimeSeries(startTime: number, endTime: number, bucketSizeMs: number = 60000, connectionId?: string): Promise { + async getClientTimeSeries( + startTime: number, + endTime: number, + bucketSizeMs: number = 60000, + connectionId?: string, + ): Promise { if (!this.db) { throw new Error('Database not initialized'); } @@ -508,7 +563,11 @@ export class SqliteAdapter implements StoragePort { return Array.from(pointsMap.values()).sort((a, b) => a.timestamp - b.timestamp); } - async getClientAnalyticsStats(startTime?: number, endTime?: number, connectionId?: string): Promise { + async getClientAnalyticsStats( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise { if (!this.db) { throw new Error('Database not initialized'); } @@ -540,10 +599,9 @@ export class SqliteAdapter implements StoragePort { const currentConditions = latestTimestamp.latest ? [...conditions, 'captured_at = ?'] : conditions; - const currentParams = latestTimestamp.latest - ? [...params, latestTimestamp.latest] - : params; - const currentWhereClause = currentConditions.length > 0 ? `WHERE ${currentConditions.join(' AND ')}` : ''; + const currentParams = latestTimestamp.latest ? [...params, latestTimestamp.latest] : params; + const currentWhereClause = + currentConditions.length > 0 ? `WHERE ${currentConditions.join(' AND ')}` : ''; const currentConnectionsResult = this.db .prepare(`SELECT COUNT(*) as count FROM client_snapshots ${currentWhereClause}`) @@ -556,7 +614,9 @@ export class SqliteAdapter implements StoragePort { ORDER BY count DESC LIMIT 1 `; - const peakResult = this.db.prepare(peakQuery).get(...params) as { captured_at: number; count: number } | undefined; + const peakResult = this.db.prepare(peakQuery).get(...params) as + | { captured_at: number; count: number } + | undefined; const uniqueNamesResult = this.db .prepare(`SELECT COUNT(DISTINCT name) as count FROM client_snapshots ${whereClause}`) @@ -567,35 +627,49 @@ export class SqliteAdapter implements StoragePort { .get(...params) as { count: number }; const uniqueIpsResult = this.db - .prepare(`SELECT COUNT(DISTINCT substr(addr, 1, instr(addr, ':') - 1)) as count FROM client_snapshots ${whereClause}`) + .prepare( + `SELECT COUNT(DISTINCT substr(addr, 1, instr(addr, ':') - 1)) as count FROM client_snapshots ${whereClause}`, + ) .get(...params) as { count: number }; - const byNameRows = this.db.prepare(` + const byNameRows = this.db + .prepare( + ` SELECT name, COUNT(*) as total, AVG(age) as avg_age FROM client_snapshots ${whereClause} GROUP BY name - `).all(...params) as Array<{ name: string; total: number; avg_age: number }>; + `, + ) + .all(...params) as Array<{ name: string; total: number; avg_age: number }>; const connectionsByName: Record = {}; for (const row of byNameRows) { if (row.name) { - const namePeakResult = this.db.prepare(` + const namePeakResult = this.db + .prepare( + ` SELECT captured_at, COUNT(*) as count FROM client_snapshots WHERE name = ? ${whereClause ? 'AND ' + whereClause.substring(6) : ''} GROUP BY captured_at ORDER BY count DESC LIMIT 1 - `).get(row.name, ...params) as { count: number } | undefined; + `, + ) + .get(row.name, ...params) as { count: number } | undefined; - const nameCurrentResult = this.db.prepare(` + const nameCurrentResult = this.db + .prepare( + ` SELECT COUNT(*) as count FROM client_snapshots WHERE name = ? ${currentWhereClause ? 'AND ' + currentWhereClause.substring(6) : ''} - `).get(row.name, ...currentParams) as { count: number }; + `, + ) + .get(row.name, ...currentParams) as { count: number }; connectionsByName[row.name] = { current: nameCurrentResult.count, @@ -605,29 +679,41 @@ export class SqliteAdapter implements StoragePort { } } - const byUserRows = this.db.prepare(` + const byUserRows = this.db + .prepare( + ` SELECT user, COUNT(*) as total FROM client_snapshots ${whereClause} GROUP BY user - `).all(...params) as Array<{ user: string; total: number }>; + `, + ) + .all(...params) as Array<{ user: string; total: number }>; const connectionsByUser: Record = {}; for (const row of byUserRows) { if (row.user) { - const userPeakResult = this.db.prepare(` + const userPeakResult = this.db + .prepare( + ` SELECT captured_at, COUNT(*) as count FROM client_snapshots WHERE user = ? ${whereClause ? 'AND ' + whereClause.substring(6) : ''} GROUP BY captured_at ORDER BY count DESC LIMIT 1 - `).get(row.user, ...params) as { count: number } | undefined; + `, + ) + .get(row.user, ...params) as { count: number } | undefined; - const userCurrentResult = this.db.prepare(` + const userCurrentResult = this.db + .prepare( + ` SELECT COUNT(*) as count FROM client_snapshots WHERE user = ? ${currentWhereClause ? 'AND ' + currentWhereClause.substring(6) : ''} - `).get(row.user, ...currentParams) as { count: number }; + `, + ) + .get(row.user, ...currentParams) as { count: number }; connectionsByUser[row.user] = { current: userCurrentResult.count, @@ -636,7 +722,9 @@ export class SqliteAdapter implements StoragePort { } } - const byUserAndNameRows = this.db.prepare(` + const byUserAndNameRows = this.db + .prepare( + ` SELECT user, name, @@ -644,26 +732,39 @@ export class SqliteAdapter implements StoragePort { AVG(age) as avg_age FROM client_snapshots ${whereClause} GROUP BY user, name - `).all(...params) as Array<{ user: string; name: string; total: number; avg_age: number }>; + `, + ) + .all(...params) as Array<{ user: string; name: string; total: number; avg_age: number }>; - const connectionsByUserAndName: Record = {}; + const connectionsByUserAndName: Record< + string, + { user: string; name: string; current: number; peak: number; avgAge: number } + > = {}; for (const row of byUserAndNameRows) { const key = `${row.user}:${row.name}`; - const combinedPeakResult = this.db.prepare(` + const combinedPeakResult = this.db + .prepare( + ` SELECT captured_at, COUNT(*) as count FROM client_snapshots WHERE user = ? AND name = ? ${whereClause ? 'AND ' + whereClause.substring(6) : ''} GROUP BY captured_at ORDER BY count DESC LIMIT 1 - `).get(row.user, row.name, ...params) as { count: number } | undefined; + `, + ) + .get(row.user, row.name, ...params) as { count: number } | undefined; - const combinedCurrentResult = this.db.prepare(` + const combinedCurrentResult = this.db + .prepare( + ` SELECT COUNT(*) as count FROM client_snapshots WHERE user = ? AND name = ? ${currentWhereClause ? 'AND ' + currentWhereClause.substring(6) : ''} - `).get(row.user, row.name, ...currentParams) as { count: number }; + `, + ) + .get(row.user, row.name, ...currentParams) as { count: number }; connectionsByUserAndName[key] = { user: row.user, @@ -675,7 +776,9 @@ export class SqliteAdapter implements StoragePort { } const timeRangeResult = this.db - .prepare(`SELECT MIN(captured_at) as earliest, MAX(captured_at) as latest FROM client_snapshots ${whereClause}`) + .prepare( + `SELECT MIN(captured_at) as earliest, MAX(captured_at) as latest FROM client_snapshots ${whereClause}`, + ) .get(...params) as { earliest: number | null; latest: number | null }; const timeRange = @@ -753,17 +856,24 @@ export class SqliteAdapter implements StoragePort { return rows.map((row) => this.mappers.mapClientRow(row)); } - async pruneOldClientSnapshots(olderThanTimestamp: number, connectionId?: string): Promise { + async pruneOldClientSnapshots( + olderThanTimestamp: number, + connectionId?: string, + ): Promise { if (!this.db) { throw new Error('Database not initialized'); } if (connectionId) { - const result = this.db.prepare('DELETE FROM client_snapshots WHERE captured_at < ? AND connection_id = ?').run(olderThanTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM client_snapshots WHERE captured_at < ? AND connection_id = ?') + .run(olderThanTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM client_snapshots WHERE captured_at < ?').run(olderThanTimestamp); + const result = this.db + .prepare('DELETE FROM client_snapshots WHERE captured_at < ?') + .run(olderThanTimestamp); return result.changes; } @@ -937,10 +1047,22 @@ export class SqliteAdapter implements StoragePort { anomaly_poll_interval_ms INTEGER NOT NULL DEFAULT 1000, anomaly_cache_ttl_ms INTEGER NOT NULL DEFAULT 3600000, anomaly_prometheus_interval_ms INTEGER NOT NULL DEFAULT 30000, + throughput_forecasting_enabled INTEGER NOT NULL DEFAULT 1, + throughput_forecasting_default_rolling_window_ms INTEGER NOT NULL DEFAULT 21600000, + throughput_forecasting_default_alert_threshold_ms INTEGER NOT NULL DEFAULT 7200000, updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000), created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000) ); + CREATE TABLE IF NOT EXISTS throughput_settings ( + connection_id TEXT PRIMARY KEY, + enabled INTEGER NOT NULL DEFAULT 1, + ops_ceiling INTEGER, + rolling_window_ms INTEGER NOT NULL DEFAULT 21600000, + alert_threshold_ms INTEGER NOT NULL DEFAULT 7200000, + updated_at INTEGER NOT NULL + ); + CREATE TABLE IF NOT EXISTS webhooks ( id TEXT PRIMARY KEY, name TEXT NOT NULL, @@ -1083,9 +1205,16 @@ export class SqliteAdapter implements StoragePort { `); // Idempotent migration for existing deployments without ops/CPU columns - const addColumnIfMissing = (table: string, column: string, type: string, defaultVal: string) => { + const addColumnIfMissing = ( + table: string, + column: string, + type: string, + defaultVal: string, + ) => { try { - this.db!.exec(`ALTER TABLE ${table} ADD COLUMN ${column} ${type} NOT NULL DEFAULT ${defaultVal}`); + this.db!.exec( + `ALTER TABLE ${table} ADD COLUMN ${column} ${type} NOT NULL DEFAULT ${defaultVal}`, + ); } catch { // Column already exists — ignore } @@ -1232,7 +1361,11 @@ export class SqliteAdapter implements StoragePort { return rows.map((row) => this.mappers.mapAnomalyEventRow(row)); } - async getAnomalyStats(startTime?: number, endTime?: number, connectionId?: string): Promise { + async getAnomalyStats( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -1258,15 +1391,21 @@ export class SqliteAdapter implements StoragePort { .get(...params) as { count: number }; const severityResult = this.db - .prepare(`SELECT severity, COUNT(*) as count FROM anomaly_events ${whereClause} GROUP BY severity`) + .prepare( + `SELECT severity, COUNT(*) as count FROM anomaly_events ${whereClause} GROUP BY severity`, + ) .all(...params) as Array<{ severity: string; count: number }>; const metricResult = this.db - .prepare(`SELECT metric_type, COUNT(*) as count FROM anomaly_events ${whereClause} GROUP BY metric_type`) + .prepare( + `SELECT metric_type, COUNT(*) as count FROM anomaly_events ${whereClause} GROUP BY metric_type`, + ) .all(...params) as Array<{ metric_type: string; count: number }>; const unresolvedResult = this.db - .prepare(`SELECT COUNT(*) as count FROM anomaly_events ${whereClause ? whereClause + ' AND' : 'WHERE'} resolved = 0`) + .prepare( + `SELECT COUNT(*) as count FROM anomaly_events ${whereClause ? whereClause + ' AND' : 'WHERE'} resolved = 0`, + ) .get(...params) as { count: number }; const bySeverity: Record = {}; @@ -1305,11 +1444,15 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const result = this.db.prepare('DELETE FROM anomaly_events WHERE timestamp < ? AND connection_id = ?').run(cutoffTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM anomaly_events WHERE timestamp < ? AND connection_id = ?') + .run(cutoffTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM anomaly_events WHERE timestamp < ?').run(cutoffTimestamp); + const result = this.db + .prepare('DELETE FROM anomaly_events WHERE timestamp < ?') + .run(cutoffTimestamp); return result.changes; } @@ -1394,15 +1537,22 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const result = this.db.prepare('DELETE FROM correlated_anomaly_groups WHERE timestamp < ? AND connection_id = ?').run(cutoffTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM correlated_anomaly_groups WHERE timestamp < ? AND connection_id = ?') + .run(cutoffTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM correlated_anomaly_groups WHERE timestamp < ?').run(cutoffTimestamp); + const result = this.db + .prepare('DELETE FROM correlated_anomaly_groups WHERE timestamp < ?') + .run(cutoffTimestamp); return result.changes; } - async saveKeyPatternSnapshots(snapshots: KeyPatternSnapshot[], connectionId: string): Promise { + async saveKeyPatternSnapshots( + snapshots: KeyPatternSnapshot[], + connectionId: string, + ): Promise { if (!this.db || snapshots.length === 0) return 0; const stmt = this.db.prepare(` @@ -1445,7 +1595,9 @@ export class SqliteAdapter implements StoragePort { return snapshots.length; } - async getKeyPatternSnapshots(options: KeyPatternQueryOptions = {}): Promise { + async getKeyPatternSnapshots( + options: KeyPatternQueryOptions = {}, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -1486,7 +1638,11 @@ export class SqliteAdapter implements StoragePort { return rows.map((row) => this.mappers.mapKeyPatternSnapshotRow(row)); } - async getKeyAnalyticsSummary(startTime?: number, endTime?: number, connectionId?: string): Promise { + async getKeyAnalyticsSummary( + startTime?: number, + endTime?: number, + connectionId?: string, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -1527,7 +1683,9 @@ export class SqliteAdapter implements StoragePort { } // Build aggregation query for latest snapshots only - const patternConditions = latestSnapshots.map(() => '(pattern = ? AND timestamp = ?)').join(' OR '); + const patternConditions = latestSnapshots + .map(() => '(pattern = ? AND timestamp = ?)') + .join(' OR '); const patternParams: any[] = []; for (const snapshot of latestSnapshots) { patternParams.push(snapshot.pattern, snapshot.latest_timestamp); @@ -1578,7 +1736,9 @@ export class SqliteAdapter implements StoragePort { // Get time range const timeRangeResult = this.db - .prepare(`SELECT MIN(timestamp) as earliest, MAX(timestamp) as latest FROM key_pattern_snapshots ${whereClause}`) + .prepare( + `SELECT MIN(timestamp) as earliest, MAX(timestamp) as latest FROM key_pattern_snapshots ${whereClause}`, + ) .get(...params) as { earliest: number | null; latest: number | null }; const timeRange = @@ -1599,12 +1759,19 @@ export class SqliteAdapter implements StoragePort { }; } - async getKeyPatternTrends(pattern: string, startTime: number, endTime: number, connectionId?: string): Promise> { + async getKeyPatternTrends( + pattern: string, + startTime: number, + endTime: number, + connectionId?: string, + ): Promise< + Array<{ + timestamp: number; + keyCount: number; + memoryBytes: number; + staleCount: number; + }> + > { if (!this.db) throw new Error('Database not initialized'); const conditions = ['pattern = ?', 'timestamp >= ?', 'timestamp <= ?']; @@ -1628,7 +1795,7 @@ export class SqliteAdapter implements StoragePort { const rows = this.db.prepare(query).all(...params) as any[]; - return rows.map(row => ({ + return rows.map((row) => ({ timestamp: row.timestamp, keyCount: row.key_count, memoryBytes: row.total_memory_bytes, @@ -1636,19 +1803,26 @@ export class SqliteAdapter implements StoragePort { })); } - async pruneOldKeyPatternSnapshots(cutoffTimestamp: number, connectionId?: string): Promise { + async pruneOldKeyPatternSnapshots( + cutoffTimestamp: number, + connectionId?: string, + ): Promise { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const result = this.db.prepare('DELETE FROM key_pattern_snapshots WHERE timestamp < ? AND connection_id = ?').run(cutoffTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM key_pattern_snapshots WHERE timestamp < ? AND connection_id = ?') + .run(cutoffTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM key_pattern_snapshots WHERE timestamp < ?').run(cutoffTimestamp); + const result = this.db + .prepare('DELETE FROM key_pattern_snapshots WHERE timestamp < ?') + .run(cutoffTimestamp); return result.changes; } - async saveHotKeys(entries: import('../../common/interfaces/storage-port.interface').HotKeyEntry[], connectionId: string): Promise { + async saveHotKeys(entries: HotKeyEntry[], connectionId: string): Promise { if (!this.db || entries.length === 0) return 0; const stmt = this.db.prepare(` @@ -1658,7 +1832,7 @@ export class SqliteAdapter implements StoragePort { ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `); - const insertMany = this.db.transaction((entries: import('../../common/interfaces/storage-port.interface').HotKeyEntry[], connId: string) => { + const insertMany = this.db.transaction((entries: HotKeyEntry[], connId: string) => { for (const entry of entries) { stmt.run( entry.id, @@ -1679,7 +1853,7 @@ export class SqliteAdapter implements StoragePort { return entries.length; } - async getHotKeys(options: import('../../common/interfaces/storage-port.interface').HotKeyQueryOptions = {}): Promise { + async getHotKeys(options: HotKeyQueryOptions = {}): Promise { if (!this.db) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -1723,14 +1897,18 @@ export class SqliteAdapter implements StoragePort { const offset = options.offset ?? 0; params.push(limit, offset); - const rows = this.db.prepare(` + const rows = this.db + .prepare( + ` SELECT id, key_name, connection_id, captured_at, signal_type, freq_score, idle_seconds, memory_bytes, ttl, rank FROM hot_key_stats ${whereClause} ORDER BY captured_at DESC, rank ASC LIMIT ? OFFSET ? - `).all(...params) as any[]; + `, + ) + .all(...params) as any[]; return rows.map((row: any) => ({ id: row.id, @@ -1750,11 +1928,15 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const result = this.db.prepare('DELETE FROM hot_key_stats WHERE captured_at < ? AND connection_id = ?').run(cutoffTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM hot_key_stats WHERE captured_at < ? AND connection_id = ?') + .run(cutoffTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM hot_key_stats WHERE captured_at < ?').run(cutoffTimestamp); + const result = this.db + .prepare('DELETE FROM hot_key_stats WHERE captured_at < ?') + .run(cutoffTimestamp); return result.changes; } @@ -1778,14 +1960,18 @@ export class SqliteAdapter implements StoragePort { INSERT INTO app_settings ( id, audit_poll_interval_ms, client_analytics_poll_interval_ms, anomaly_poll_interval_ms, anomaly_cache_ttl_ms, anomaly_prometheus_interval_ms, + throughput_forecasting_enabled, throughput_forecasting_default_rolling_window_ms, throughput_forecasting_default_alert_threshold_ms, updated_at, created_at - ) VALUES (1, ?, ?, ?, ?, ?, ?, ?) + ) VALUES (1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT(id) DO UPDATE SET audit_poll_interval_ms = excluded.audit_poll_interval_ms, client_analytics_poll_interval_ms = excluded.client_analytics_poll_interval_ms, anomaly_poll_interval_ms = excluded.anomaly_poll_interval_ms, anomaly_cache_ttl_ms = excluded.anomaly_cache_ttl_ms, anomaly_prometheus_interval_ms = excluded.anomaly_prometheus_interval_ms, + throughput_forecasting_enabled = excluded.throughput_forecasting_enabled, + throughput_forecasting_default_rolling_window_ms = excluded.throughput_forecasting_default_rolling_window_ms, + throughput_forecasting_default_alert_threshold_ms = excluded.throughput_forecasting_default_alert_threshold_ms, updated_at = excluded.updated_at `); @@ -1795,8 +1981,11 @@ export class SqliteAdapter implements StoragePort { settings.anomalyPollIntervalMs, settings.anomalyCacheTtlMs, settings.anomalyPrometheusIntervalMs, + settings.throughputForecastingEnabled ? 1 : 0, + settings.throughputForecastingDefaultRollingWindowMs, + settings.throughputForecastingDefaultAlertThresholdMs, now, - settings.createdAt || now + settings.createdAt || now, ); const saved = await this.getSettings(); @@ -1847,7 +2036,7 @@ export class SqliteAdapter implements StoragePort { webhook.thresholds ? JSON.stringify(webhook.thresholds) : null, webhook.connectionId || null, now, - now + now, ); return { @@ -1881,12 +2070,18 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const rows = this.db.prepare('SELECT * FROM webhooks WHERE connection_id = ? OR connection_id IS NULL ORDER BY created_at DESC').all(connectionId) as any[]; + const rows = this.db + .prepare( + 'SELECT * FROM webhooks WHERE connection_id = ? OR connection_id IS NULL ORDER BY created_at DESC', + ) + .all(connectionId) as any[]; return rows.map((row) => this.mappers.mapWebhookRow(row)); } // No connectionId provided - only return global webhooks (not scoped to any connection) - const rows = this.db.prepare('SELECT * FROM webhooks WHERE connection_id IS NULL ORDER BY created_at DESC').all() as any[]; + const rows = this.db + .prepare('SELECT * FROM webhooks WHERE connection_id IS NULL ORDER BY created_at DESC') + .all() as any[]; return rows.map((row) => this.mappers.mapWebhookRow(row)); } @@ -1895,20 +2090,29 @@ export class SqliteAdapter implements StoragePort { if (connectionId) { // Return webhooks scoped to this connection OR global webhooks (no connectionId) - const rows = this.db.prepare('SELECT * FROM webhooks WHERE enabled = 1 AND (connection_id = ? OR connection_id IS NULL)').all(connectionId) as any[]; + const rows = this.db + .prepare( + 'SELECT * FROM webhooks WHERE enabled = 1 AND (connection_id = ? OR connection_id IS NULL)', + ) + .all(connectionId) as any[]; return rows .map((row) => this.mappers.mapWebhookRow(row)) .filter((webhook) => webhook.events.includes(event)); } // No connectionId provided - only return global webhooks (not scoped to any connection) - const rows = this.db.prepare('SELECT * FROM webhooks WHERE enabled = 1 AND connection_id IS NULL').all() as any[]; + const rows = this.db + .prepare('SELECT * FROM webhooks WHERE enabled = 1 AND connection_id IS NULL') + .all() as any[]; return rows .map((row) => this.mappers.mapWebhookRow(row)) .filter((webhook) => webhook.events.includes(event)); } - async updateWebhook(id: string, updates: Partial>): Promise { + async updateWebhook( + id: string, + updates: Partial>, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const setClauses: string[] = []; @@ -1981,7 +2185,9 @@ export class SqliteAdapter implements StoragePort { return result.changes > 0; } - async createDelivery(delivery: Omit): Promise { + async createDelivery( + delivery: Omit, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const id = randomUUID(); @@ -2007,7 +2213,7 @@ export class SqliteAdapter implements StoragePort { delivery.completedAt || null, delivery.durationMs || null, delivery.connectionId || null, - now + now, ); return { @@ -2036,15 +2242,26 @@ export class SqliteAdapter implements StoragePort { return this.mappers.mapDeliveryRow(row); } - async getDeliveriesByWebhook(webhookId: string, limit: number = 50, offset: number = 0): Promise { + async getDeliveriesByWebhook( + webhookId: string, + limit: number = 50, + offset: number = 0, + ): Promise { if (!this.db) throw new Error('Database not initialized'); - const rows = this.db.prepare('SELECT * FROM webhook_deliveries WHERE webhook_id = ? ORDER BY created_at DESC LIMIT ? OFFSET ?').all(webhookId, limit, offset) as any[]; + const rows = this.db + .prepare( + 'SELECT * FROM webhook_deliveries WHERE webhook_id = ? ORDER BY created_at DESC LIMIT ? OFFSET ?', + ) + .all(webhookId, limit, offset) as any[]; return rows.map((row) => this.mappers.mapDeliveryRow(row)); } - async updateDelivery(id: string, updates: Partial>): Promise { + async updateDelivery( + id: string, + updates: Partial>, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const setClauses: string[] = []; @@ -2083,33 +2300,42 @@ export class SqliteAdapter implements StoragePort { params.push(id); - const stmt = this.db.prepare(`UPDATE webhook_deliveries SET ${setClauses.join(', ')} WHERE id = ?`); + const stmt = this.db.prepare( + `UPDATE webhook_deliveries SET ${setClauses.join(', ')} WHERE id = ?`, + ); const result = stmt.run(...params); return result.changes > 0; } - async getRetriableDeliveries(limit: number = 100, connectionId?: string): Promise { + async getRetriableDeliveries( + limit: number = 100, + connectionId?: string, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const now = Date.now(); if (connectionId) { - const rows = this.db.prepare( - `SELECT * FROM webhook_deliveries + const rows = this.db + .prepare( + `SELECT * FROM webhook_deliveries WHERE status = 'retrying' AND next_retry_at <= ? AND connection_id = ? ORDER BY next_retry_at ASC - LIMIT ?` - ).all(now, connectionId, limit) as any[]; + LIMIT ?`, + ) + .all(now, connectionId, limit) as any[]; return rows.map((row) => this.mappers.mapDeliveryRow(row)); } - const rows = this.db.prepare( - `SELECT * FROM webhook_deliveries + const rows = this.db + .prepare( + `SELECT * FROM webhook_deliveries WHERE status = 'retrying' AND next_retry_at <= ? ORDER BY next_retry_at ASC - LIMIT ?` - ).all(now, limit) as any[]; + LIMIT ?`, + ) + .all(now, limit) as any[]; return rows.map((row) => this.mappers.mapDeliveryRow(row)); } @@ -2118,11 +2344,15 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const result = this.db.prepare('DELETE FROM webhook_deliveries WHERE created_at < ? AND connection_id = ?').run(cutoffTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM webhook_deliveries WHERE created_at < ? AND connection_id = ?') + .run(cutoffTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM webhook_deliveries WHERE created_at < ?').run(cutoffTimestamp); + const result = this.db + .prepare('DELETE FROM webhook_deliveries WHERE created_at < ?') + .run(cutoffTimestamp); return result.changes; } @@ -2144,7 +2374,7 @@ export class SqliteAdapter implements StoragePort { entry.id, entry.timestamp, entry.duration, - JSON.stringify(entry.command), // Store as JSON string + JSON.stringify(entry.command), // Store as JSON string entry.clientAddress || '', entry.clientName || '', entry.capturedAt, @@ -2195,14 +2425,16 @@ export class SqliteAdapter implements StoragePort { const limit = options.limit ?? 100; const offset = options.offset ?? 0; - const rows = this.db.prepare( - `SELECT slowlog_id, timestamp, duration, command, + const rows = this.db + .prepare( + `SELECT slowlog_id, timestamp, duration, command, client_address, client_name, captured_at, source_host, source_port, connection_id FROM slow_log_entries ${whereClause} ORDER BY timestamp DESC - LIMIT ? OFFSET ?` - ).all(...params, limit, offset) as any[]; + LIMIT ? OFFSET ?`, + ) + .all(...params, limit, offset) as any[]; return rows.map((row) => this.mappers.mapSlowLogEntryRow(row)); } @@ -2211,11 +2443,15 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const row = this.db.prepare('SELECT MAX(slowlog_id) as max_id FROM slow_log_entries WHERE connection_id = ?').get(connectionId) as any; + const row = this.db + .prepare('SELECT MAX(slowlog_id) as max_id FROM slow_log_entries WHERE connection_id = ?') + .get(connectionId) as any; return row?.max_id ?? null; } - const row = this.db.prepare('SELECT MAX(slowlog_id) as max_id FROM slow_log_entries').get() as any; + const row = this.db + .prepare('SELECT MAX(slowlog_id) as max_id FROM slow_log_entries') + .get() as any; return row?.max_id ?? null; } @@ -2223,16 +2459,23 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const result = this.db.prepare('DELETE FROM slow_log_entries WHERE captured_at < ? AND connection_id = ?').run(cutoffTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM slow_log_entries WHERE captured_at < ? AND connection_id = ?') + .run(cutoffTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM slow_log_entries WHERE captured_at < ?').run(cutoffTimestamp); + const result = this.db + .prepare('DELETE FROM slow_log_entries WHERE captured_at < ?') + .run(cutoffTimestamp); return result.changes; } // Command Log Methods - async saveCommandLogEntries(entries: StoredCommandLogEntry[], connectionId: string): Promise { + async saveCommandLogEntries( + entries: StoredCommandLogEntry[], + connectionId: string, + ): Promise { if (!this.db || entries.length === 0) return 0; const stmt = this.db.prepare(` @@ -2266,7 +2509,9 @@ export class SqliteAdapter implements StoragePort { return count; } - async getCommandLogEntries(options: CommandLogQueryOptions = {}): Promise { + async getCommandLogEntries( + options: CommandLogQueryOptions = {}, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -2305,14 +2550,16 @@ export class SqliteAdapter implements StoragePort { const limit = options.limit ?? 100; const offset = options.offset ?? 0; - const rows = this.db.prepare( - `SELECT commandlog_id, timestamp, duration, command, + const rows = this.db + .prepare( + `SELECT commandlog_id, timestamp, duration, command, client_address, client_name, log_type, captured_at, source_host, source_port, connection_id FROM command_log_entries ${whereClause} ORDER BY timestamp DESC - LIMIT ? OFFSET ?` - ).all(...params, limit, offset) as any[]; + LIMIT ? OFFSET ?`, + ) + .all(...params, limit, offset) as any[]; return rows.map((row) => this.mappers.mapCommandLogEntryRow(row)); } @@ -2321,15 +2568,17 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const row = this.db.prepare( - 'SELECT MAX(commandlog_id) as max_id FROM command_log_entries WHERE log_type = ? AND connection_id = ?' - ).get(type, connectionId) as any; + const row = this.db + .prepare( + 'SELECT MAX(commandlog_id) as max_id FROM command_log_entries WHERE log_type = ? AND connection_id = ?', + ) + .get(type, connectionId) as any; return row?.max_id ?? null; } - const row = this.db.prepare( - 'SELECT MAX(commandlog_id) as max_id FROM command_log_entries WHERE log_type = ?' - ).get(type) as any; + const row = this.db + .prepare('SELECT MAX(commandlog_id) as max_id FROM command_log_entries WHERE log_type = ?') + .get(type) as any; return row?.max_id ?? null; } @@ -2337,18 +2586,23 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const result = this.db.prepare( - 'DELETE FROM command_log_entries WHERE captured_at < ? AND connection_id = ?' - ).run(cutoffTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM command_log_entries WHERE captured_at < ? AND connection_id = ?') + .run(cutoffTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM command_log_entries WHERE captured_at < ?').run(cutoffTimestamp); + const result = this.db + .prepare('DELETE FROM command_log_entries WHERE captured_at < ?') + .run(cutoffTimestamp); return result.changes; } // Latency Snapshot Methods - async saveLatencySnapshots(snapshots: StoredLatencySnapshot[], connectionId: string): Promise { + async saveLatencySnapshots( + snapshots: StoredLatencySnapshot[], + connectionId: string, + ): Promise { if (!this.db || snapshots.length === 0) return 0; const stmt = this.db.prepare(` @@ -2375,7 +2629,9 @@ export class SqliteAdapter implements StoragePort { return count; } - async getLatencySnapshots(options: LatencySnapshotQueryOptions = {}): Promise { + async getLatencySnapshots( + options: LatencySnapshotQueryOptions = {}, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -2408,7 +2664,7 @@ export class SqliteAdapter implements StoragePort { params.push(limit, offset); const rows = this.db.prepare(query).all(...params) as any[]; - return rows.map(row => ({ + return rows.map((row) => ({ id: row.id, timestamp: row.timestamp, eventName: row.event_name, @@ -2422,26 +2678,37 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const result = this.db.prepare('DELETE FROM latency_snapshots WHERE timestamp < ? AND connection_id = ?').run(cutoffTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM latency_snapshots WHERE timestamp < ? AND connection_id = ?') + .run(cutoffTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM latency_snapshots WHERE timestamp < ?').run(cutoffTimestamp); + const result = this.db + .prepare('DELETE FROM latency_snapshots WHERE timestamp < ?') + .run(cutoffTimestamp); return result.changes; } // Latency Histogram Methods - async saveLatencyHistogram(histogram: import('../../common/interfaces/storage-port.interface').StoredLatencyHistogram, connectionId: string): Promise { + async saveLatencyHistogram( + histogram: StoredLatencyHistogram, + connectionId: string, + ): Promise { if (!this.db) return 0; - const result = this.db.prepare( - `INSERT INTO latency_histograms (id, timestamp, histogram_data, connection_id) - VALUES (?, ?, ?, ?)` - ).run(histogram.id, histogram.timestamp, JSON.stringify(histogram.data), connectionId); + const result = this.db + .prepare( + `INSERT INTO latency_histograms (id, timestamp, histogram_data, connection_id) + VALUES (?, ?, ?, ?)`, + ) + .run(histogram.id, histogram.timestamp, JSON.stringify(histogram.data), connectionId); return result.changes; } - async getLatencyHistograms(options: { connectionId?: string; startTime?: number; endTime?: number; limit?: number } = {}): Promise { + async getLatencyHistograms( + options: { connectionId?: string; startTime?: number; endTime?: number; limit?: number } = {}, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -2473,7 +2740,7 @@ export class SqliteAdapter implements StoragePort { params.push(limit); const rows = this.db.prepare(query).all(...params) as any[]; - return rows.map(row => ({ + return rows.map((row) => ({ id: row.id, timestamp: row.timestamp, data: JSON.parse(row.histogram_data), @@ -2485,16 +2752,23 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const result = this.db.prepare('DELETE FROM latency_histograms WHERE timestamp < ? AND connection_id = ?').run(cutoffTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM latency_histograms WHERE timestamp < ? AND connection_id = ?') + .run(cutoffTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM latency_histograms WHERE timestamp < ?').run(cutoffTimestamp); + const result = this.db + .prepare('DELETE FROM latency_histograms WHERE timestamp < ?') + .run(cutoffTimestamp); return result.changes; } // Memory Snapshot Methods - async saveMemorySnapshots(snapshots: StoredMemorySnapshot[], connectionId: string): Promise { + async saveMemorySnapshots( + snapshots: StoredMemorySnapshot[], + connectionId: string, + ): Promise { if (!this.db || snapshots.length === 0) return 0; const stmt = this.db.prepare(` @@ -2532,7 +2806,9 @@ export class SqliteAdapter implements StoragePort { return count; } - async getMemorySnapshots(options: MemorySnapshotQueryOptions = {}): Promise { + async getMemorySnapshots( + options: MemorySnapshotQueryOptions = {}, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -2567,7 +2843,7 @@ export class SqliteAdapter implements StoragePort { params.push(limit, offset); const rows = this.db.prepare(query).all(...params) as any[]; - return rows.map(row => ({ + return rows.map((row) => ({ id: row.id, timestamp: row.timestamp, usedMemory: row.used_memory, @@ -2589,16 +2865,23 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const result = this.db.prepare('DELETE FROM memory_snapshots WHERE timestamp < ? AND connection_id = ?').run(cutoffTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM memory_snapshots WHERE timestamp < ? AND connection_id = ?') + .run(cutoffTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM memory_snapshots WHERE timestamp < ?').run(cutoffTimestamp); + const result = this.db + .prepare('DELETE FROM memory_snapshots WHERE timestamp < ?') + .run(cutoffTimestamp); return result.changes; } // Vector Index Snapshot Methods - async saveVectorIndexSnapshots(snapshots: VectorIndexSnapshot[], connectionId: string): Promise { + async saveVectorIndexSnapshots( + snapshots: VectorIndexSnapshot[], + connectionId: string, + ): Promise { if (!this.db || snapshots.length === 0) return 0; const stmt = this.db.prepare(` @@ -2625,7 +2908,9 @@ export class SqliteAdapter implements StoragePort { return count; } - async getVectorIndexSnapshots(options: VectorIndexSnapshotQueryOptions = {}): Promise { + async getVectorIndexSnapshots( + options: VectorIndexSnapshotQueryOptions = {}, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const conditions: string[] = []; @@ -2661,7 +2946,7 @@ export class SqliteAdapter implements StoragePort { params.push(limit); const rows = this.db.prepare(query).all(...params) as any[]; - return rows.map(row => ({ + return rows.map((row) => ({ id: row.id, timestamp: row.timestamp, connectionId: row.connection_id, @@ -2671,20 +2956,27 @@ export class SqliteAdapter implements StoragePort { })); } - async pruneOldVectorIndexSnapshots(cutoffTimestamp: number, connectionId?: string): Promise { + async pruneOldVectorIndexSnapshots( + cutoffTimestamp: number, + connectionId?: string, + ): Promise { if (!this.db) throw new Error('Database not initialized'); if (connectionId) { - const result = this.db.prepare('DELETE FROM vector_index_snapshots WHERE timestamp < ? AND connection_id = ?').run(cutoffTimestamp, connectionId); + const result = this.db + .prepare('DELETE FROM vector_index_snapshots WHERE timestamp < ? AND connection_id = ?') + .run(cutoffTimestamp, connectionId); return result.changes; } - const result = this.db.prepare('DELETE FROM vector_index_snapshots WHERE timestamp < ?').run(cutoffTimestamp); + const result = this.db + .prepare('DELETE FROM vector_index_snapshots WHERE timestamp < ?') + .run(cutoffTimestamp); return result.changes; } // Connection Management Methods - async saveConnection(config: import('../../common/interfaces/storage-port.interface').DatabaseConnectionConfig): Promise { + async saveConnection(config: DatabaseConnectionConfig): Promise { if (!this.db) throw new Error('Database not initialized'); // Ensure connections table exists @@ -2706,8 +2998,8 @@ export class SqliteAdapter implements StoragePort { `); // Migration: add password_encrypted column if it doesn't exist - const columns = this.db.prepare("PRAGMA table_info(connections)").all() as { name: string }[]; - if (!columns.some(c => c.name === 'password_encrypted')) { + const columns = this.db.prepare('PRAGMA table_info(connections)').all() as { name: string }[]; + if (!columns.some((c) => c.name === 'password_encrypted')) { this.db.exec('ALTER TABLE connections ADD COLUMN password_encrypted INTEGER DEFAULT 0'); } @@ -2726,8 +3018,8 @@ export class SqliteAdapter implements StoragePort { `); // Migration: add type column to existing agent_tokens tables - const atCols = this.db.prepare("PRAGMA table_info(agent_tokens)").all() as { name: string }[]; - if (!atCols.some(c => c.name === 'type')) { + const atCols = this.db.prepare('PRAGMA table_info(agent_tokens)').all() as { name: string }[]; + if (!atCols.some((c) => c.name === 'type')) { this.db.exec("ALTER TABLE agent_tokens ADD COLUMN type TEXT NOT NULL DEFAULT 'agent'"); } @@ -2763,16 +3055,20 @@ export class SqliteAdapter implements StoragePort { ); } - async getConnections(): Promise { + async getConnections(): Promise { if (!this.db) throw new Error('Database not initialized'); // Return empty array if table doesn't exist - const tableExists = this.db.prepare("SELECT name FROM sqlite_master WHERE type='table' AND name='connections'").get(); + const tableExists = this.db + .prepare("SELECT name FROM sqlite_master WHERE type='table' AND name='connections'") + .get(); if (!tableExists) return []; - const rows = this.db.prepare('SELECT * FROM connections ORDER BY created_at ASC').all() as any[]; + const rows = this.db + .prepare('SELECT * FROM connections ORDER BY created_at ASC') + .all() as any[]; - return rows.map(row => ({ + return rows.map((row) => ({ id: row.id, name: row.name, host: row.host, @@ -2788,10 +3084,12 @@ export class SqliteAdapter implements StoragePort { })); } - async getConnection(id: string): Promise { + async getConnection(id: string): Promise { if (!this.db) throw new Error('Database not initialized'); - const tableExists = this.db.prepare("SELECT name FROM sqlite_master WHERE type='table' AND name='connections'").get(); + const tableExists = this.db + .prepare("SELECT name FROM sqlite_master WHERE type='table' AND name='connections'") + .get(); if (!tableExists) return null; const row = this.db.prepare('SELECT * FROM connections WHERE id = ?').get(id) as any; @@ -2819,7 +3117,7 @@ export class SqliteAdapter implements StoragePort { this.db.prepare('DELETE FROM connections WHERE id = ?').run(id); } - async updateConnection(id: string, updates: Partial): Promise { + async updateConnection(id: string, updates: Partial): Promise { if (!this.db) throw new Error('Database not initialized'); const setClauses: string[] = []; @@ -2869,15 +3167,46 @@ export class SqliteAdapter implements StoragePort { // Agent Token Methods - async saveAgentToken(token: { id: string; name: string; type: 'agent' | 'mcp'; tokenHash: string; createdAt: number; expiresAt: number; revokedAt: number | null; lastUsedAt: number | null }): Promise { + async saveAgentToken(token: { + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + }): Promise { if (!this.db) throw new Error('Database not initialized'); - this.db.prepare( - `INSERT OR REPLACE INTO agent_tokens (id, name, type, token_hash, created_at, expires_at, revoked_at, last_used_at) - VALUES (?, ?, ?, ?, ?, ?, ?, ?)` - ).run(token.id, token.name, token.type, token.tokenHash, token.createdAt, token.expiresAt, token.revokedAt, token.lastUsedAt); + this.db + .prepare( + `INSERT OR REPLACE INTO agent_tokens (id, name, type, token_hash, created_at, expires_at, revoked_at, last_used_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, + ) + .run( + token.id, + token.name, + token.type, + token.tokenHash, + token.createdAt, + token.expiresAt, + token.revokedAt, + token.lastUsedAt, + ); } - async getAgentTokens(type?: 'agent' | 'mcp'): Promise> { + async getAgentTokens(type?: 'agent' | 'mcp'): Promise< + Array<{ + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + }> + > { if (!this.db) throw new Error('Database not initialized'); const query = type ? 'SELECT * FROM agent_tokens WHERE type = ? ORDER BY created_at DESC' @@ -2895,7 +3224,16 @@ export class SqliteAdapter implements StoragePort { })); } - async getAgentTokenByHash(hash: string): Promise<{ id: string; name: string; type: 'agent' | 'mcp'; tokenHash: string; createdAt: number; expiresAt: number; revokedAt: number | null; lastUsedAt: number | null } | null> { + async getAgentTokenByHash(hash: string): Promise<{ + id: string; + name: string; + type: 'agent' | 'mcp'; + tokenHash: string; + createdAt: number; + expiresAt: number; + revokedAt: number | null; + lastUsedAt: number | null; + } | null> { if (!this.db) throw new Error('Database not initialized'); const row = this.db.prepare('SELECT * FROM agent_tokens WHERE token_hash = ?').get(hash) as any; if (!row) return null; @@ -2920,4 +3258,78 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); this.db.prepare('UPDATE agent_tokens SET last_used_at = ? WHERE id = ?').run(Date.now(), id); } + + // Throughput Forecasting Settings + async getThroughputSettings(connectionId: string): Promise { + if (!this.db) { + throw new Error('Database not initialized'); + } + const row = this.db + .prepare('SELECT * FROM throughput_settings WHERE connection_id = ?') + .get(connectionId) as unknown as ThroughputSettingsRow; + + if (!row) { + return null; + } + return { + connectionId: row.connection_id, + enabled: !!row.enabled, + opsCeiling: row.ops_ceiling ?? null, + rollingWindowMs: row.rolling_window_ms, + alertThresholdMs: row.alert_threshold_ms, + updatedAt: row.updated_at, + }; + } + + async saveThroughputSettings(settings: ThroughputSettings): Promise { + if (!this.db) throw new Error('Database not initialized'); + this.db + .prepare( + ` + INSERT INTO throughput_settings (connection_id, enabled, ops_ceiling, rolling_window_ms, alert_threshold_ms, updated_at) + VALUES (?, ?, ?, ?, ?, ?) + ON CONFLICT(connection_id) DO UPDATE SET + enabled = excluded.enabled, + ops_ceiling = excluded.ops_ceiling, + rolling_window_ms = excluded.rolling_window_ms, + alert_threshold_ms = excluded.alert_threshold_ms, + updated_at = excluded.updated_at + `, + ) + .run( + settings.connectionId, + settings.enabled ? 1 : 0, + settings.opsCeiling, + settings.rollingWindowMs, + settings.alertThresholdMs, + settings.updatedAt, + ); + return { ...settings }; + } + + async deleteThroughputSettings(connectionId: string): Promise { + if (!this.db) throw new Error('Database not initialized'); + const result = this.db + .prepare('DELETE FROM throughput_settings WHERE connection_id = ?') + .run(connectionId); + return result.changes > 0; + } + + async getActiveThroughputSettings(): Promise { + if (!this.db) throw new Error('Database not initialized'); + const rows = this.db + .prepare('SELECT * FROM throughput_settings WHERE enabled = 1 AND ops_ceiling IS NOT NULL') + .all() as ThroughputSettingsRow[]; + if (!rows || rows.length === 0) { + return []; + } + return rows.map((row) => ({ + connectionId: row!.connection_id, + enabled: !!row!.enabled, + opsCeiling: row!.ops_ceiling, + rollingWindowMs: row!.rolling_window_ms, + alertThresholdMs: row!.alert_threshold_ms, + updatedAt: row!.updated_at, + })); + } } diff --git a/apps/api/src/throughput-forecasting/__tests__/throughput-forecasting.service.spec.ts b/apps/api/src/throughput-forecasting/__tests__/throughput-forecasting.service.spec.ts new file mode 100644 index 00000000..7a82ae8e --- /dev/null +++ b/apps/api/src/throughput-forecasting/__tests__/throughput-forecasting.service.spec.ts @@ -0,0 +1,554 @@ +import { Test, TestingModule } from '@nestjs/testing'; +import { MemoryAdapter } from '../../storage/adapters/memory.adapter'; +import { ThroughputForecastingService } from '../throughput-forecasting.service'; +import { SettingsService } from '../../settings/settings.service'; +import { ConnectionRegistry } from '../../connections/connection-registry.service'; +import type { AppSettings } from '@betterdb/shared'; +import type { ThroughputSettings } from '@betterdb/shared'; +import type { StoredMemorySnapshot } from '../../common/interfaces/storage-port.interface'; + +// ── Test Helpers ── + +function mockGlobalSettings(overrides?: Partial): AppSettings { + return { + id: 1, + auditPollIntervalMs: 60000, + clientAnalyticsPollIntervalMs: 60000, + anomalyPollIntervalMs: 1000, + anomalyCacheTtlMs: 3600000, + anomalyPrometheusIntervalMs: 30000, + throughputForecastingEnabled: true, + throughputForecastingDefaultRollingWindowMs: 21600000, + throughputForecastingDefaultAlertThresholdMs: 7200000, + createdAt: Date.now(), + updatedAt: Date.now(), + ...overrides, + }; +} + +function makeThroughputSettings(overrides?: Partial): ThroughputSettings { + return { + connectionId: 'conn-1', + enabled: true, + opsCeiling: null, + rollingWindowMs: 21600000, + alertThresholdMs: 7200000, + updatedAt: Date.now(), + ...overrides, + }; +} + +function generateSnapshots(opts: { + count: number; + startTime: number; + intervalMs: number; + startOps: number; + endOps: number; + connectionId?: string; +}): StoredMemorySnapshot[] { + const snapshots: StoredMemorySnapshot[] = []; + for (let i = 0; i < opts.count; i++) { + const t = i / (opts.count - 1); + snapshots.push({ + id: `snap-${i}`, + timestamp: opts.startTime + i * opts.intervalMs, + usedMemory: 1000000, + usedMemoryRss: 1200000, + usedMemoryPeak: 1500000, + memFragmentationRatio: 1.2, + maxmemory: 0, + allocatorFragRatio: 1.0, + opsPerSec: Math.round(opts.startOps + t * (opts.endOps - opts.startOps)), + cpuSys: 1.0, + cpuUser: 2.0, + ioThreadedReads: 0, + ioThreadedWrites: 0, + connectionId: opts.connectionId ?? 'conn-1', + }); + } + return snapshots; +} + +// ── Test Suite ── + +describe('ThroughputForecastingService', () => { + let service: ThroughputForecastingService; + let storage: MemoryAdapter; + let settingsService: { getCachedSettings: jest.Mock }; + + beforeEach(async () => { + storage = new MemoryAdapter(); + await storage.initialize(); + + settingsService = { + getCachedSettings: jest.fn().mockReturnValue(mockGlobalSettings()), + }; + + const module: TestingModule = await Test.createTestingModule({ + providers: [ + ThroughputForecastingService, + { provide: 'STORAGE_CLIENT', useValue: storage }, + { provide: SettingsService, useValue: settingsService }, + { + provide: ConnectionRegistry, + useValue: { list: jest.fn().mockReturnValue([]), getConfig: jest.fn() }, + }, + ], + }).compile(); + + service = module.get(ThroughputForecastingService); + }); + + // ── Slice 1: Storage Round-Trip ── + + describe('Slice 1: Storage round-trip', () => { + it('1a: saves and retrieves throughput settings', async () => { + const settings = makeThroughputSettings({ connectionId: 'conn-1', opsCeiling: 80000 }); + await storage.saveThroughputSettings(settings); + const result = await storage.getThroughputSettings('conn-1'); + expect(result).not.toBeNull(); + expect(result!.connectionId).toBe('conn-1'); + expect(result!.opsCeiling).toBe(80000); + expect(result!.enabled).toBe(true); + expect(result!.rollingWindowMs).toBe(21600000); + }); + + it('1b: returns null for missing connection', async () => { + const result = await storage.getThroughputSettings('conn-unknown'); + expect(result).toBeNull(); + }); + + it('1c: upsert overwrites existing settings', async () => { + await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 50000 })); + await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 90000 })); + const result = await storage.getThroughputSettings('conn-1'); + expect(result!.opsCeiling).toBe(90000); + }); + + it('1d: delete removes settings and returns true', async () => { + await storage.saveThroughputSettings(makeThroughputSettings()); + const deleted = await storage.deleteThroughputSettings('conn-1'); + expect(deleted).toBe(true); + const result = await storage.getThroughputSettings('conn-1'); + expect(result).toBeNull(); + }); + + it('1e: delete non-existent returns false', async () => { + const deleted = await storage.deleteThroughputSettings('conn-unknown'); + expect(deleted).toBe(false); + }); + + it('1f: getActiveThroughputSettings filters correctly', async () => { + await storage.saveThroughputSettings( + makeThroughputSettings({ connectionId: 'conn-a', enabled: true, opsCeiling: 80000 }), + ); + await storage.saveThroughputSettings( + makeThroughputSettings({ connectionId: 'conn-b', enabled: true, opsCeiling: null }), + ); + await storage.saveThroughputSettings( + makeThroughputSettings({ connectionId: 'conn-c', enabled: false, opsCeiling: 80000 }), + ); + const active = await storage.getActiveThroughputSettings(); + expect(active).toHaveLength(1); + expect(active[0].connectionId).toBe('conn-a'); + }); + }); + + // ── Slice 2: Rising Trend, No Ceiling ── + + describe('Slice 2: Rising trend, no ceiling', () => { + it('2a: returns rising trend with correct direction and growth', async () => { + const now = Date.now(); + const snapshots = generateSnapshots({ + count: 60, + startTime: now - 60 * 60_000, + intervalMs: 60_000, + startOps: 10_000, + endOps: 20_000, + connectionId: 'conn-1', + }); + await storage.saveMemorySnapshots(snapshots, 'conn-1'); + + const forecast = await service.getForecast('conn-1'); + + expect(forecast.mode).toBe('trend'); + expect(forecast.trendDirection).toBe('rising'); + expect(forecast.growthPercent).toBeGreaterThan(5); + expect(forecast.timeToLimitMs).toBeNull(); + expect(forecast.opsCeiling).toBeNull(); + expect(forecast.currentOpsPerSec).toBeGreaterThanOrEqual(19_000); + expect(forecast.insufficientData).toBe(false); + expect(forecast.enabled).toBe(true); + expect(forecast.dataPointCount).toBe(60); + }); + }); + + // ── Slice 3: Rising Trend with Ceiling ── + + describe('Slice 3: Rising trend with ceiling', () => { + it('3a: returns forecast with time-to-limit', async () => { + const now = Date.now(); + const snapshots = generateSnapshots({ + count: 60, + startTime: now - 60 * 60_000, + intervalMs: 60_000, + startOps: 40_000, + endOps: 50_000, + connectionId: 'conn-1', + }); + await storage.saveMemorySnapshots(snapshots, 'conn-1'); + await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 80_000 })); + + const forecast = await service.getForecast('conn-1'); + + expect(forecast.mode).toBe('forecast'); + expect(forecast.timeToLimitMs).toBeGreaterThan(0); + expect(forecast.timeToLimitHuman).toContain('at current growth rate'); + expect(forecast.opsCeiling).toBe(80_000); + }); + + it('3b: time-to-limit is approximately correct', async () => { + const now = Date.now(); + // Growth: 10k/hr, current ~50k, ceiling 80k => ~3h to limit + const snapshots = generateSnapshots({ + count: 60, + startTime: now - 60 * 60_000, + intervalMs: 60_000, + startOps: 40_000, + endOps: 50_000, + connectionId: 'conn-1', + }); + await storage.saveMemorySnapshots(snapshots, 'conn-1'); + await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 80_000 })); + + const forecast = await service.getForecast('conn-1'); + const threeHoursMs = 3 * 3_600_000; + + expect(forecast.timeToLimitMs).toBeGreaterThan(threeHoursMs * 0.8); + expect(forecast.timeToLimitMs).toBeLessThan(threeHoursMs * 1.2); + }); + }); + + // ── Slice 4: Falling/Stable Trend with Ceiling ── + + describe('Slice 4: Falling/stable trend with ceiling', () => { + it('4a: falling trend returns not projected', async () => { + const now = Date.now(); + const snapshots = generateSnapshots({ + count: 60, + startTime: now - 60 * 60_000, + intervalMs: 60_000, + startOps: 50_000, + endOps: 40_000, + connectionId: 'conn-1', + }); + await storage.saveMemorySnapshots(snapshots, 'conn-1'); + await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 80_000 })); + + const forecast = await service.getForecast('conn-1'); + + expect(forecast.mode).toBe('forecast'); + expect(forecast.trendDirection).toBe('falling'); + expect(forecast.timeToLimitMs).toBeNull(); + expect(forecast.timeToLimitHuman).toContain('Not projected'); + }); + + it('4b: stable trend returns not projected', async () => { + const now = Date.now(); + const snapshots = generateSnapshots({ + count: 60, + startTime: now - 60 * 60_000, + intervalMs: 60_000, + startOps: 50_000, + endOps: 50_000, + connectionId: 'conn-1', + }); + await storage.saveMemorySnapshots(snapshots, 'conn-1'); + await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 80_000 })); + + const forecast = await service.getForecast('conn-1'); + + expect(forecast.mode).toBe('forecast'); + expect(forecast.trendDirection).toBe('stable'); + expect(forecast.timeToLimitMs).toBeNull(); + }); + }); + + // ── Slice 5: Ceiling Already Exceeded ── + + describe('Slice 5: Ceiling already exceeded', () => { + it('5a: returns exceeded when ops above ceiling', async () => { + const now = Date.now(); + const snapshots = generateSnapshots({ + count: 60, + startTime: now - 60 * 60_000, + intervalMs: 60_000, + startOps: 85_000, + endOps: 90_000, + connectionId: 'conn-1', + }); + await storage.saveMemorySnapshots(snapshots, 'conn-1'); + await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 80_000 })); + + const forecast = await service.getForecast('conn-1'); + + expect(forecast.mode).toBe('forecast'); + expect(forecast.timeToLimitHuman).toMatch(/exceeded/i); + }); + }); + + // ── Slice 6: Insufficient Data ── + + describe('Slice 6: Insufficient data', () => { + it('6a: no snapshots returns insufficient data', async () => { + const forecast = await service.getForecast('conn-1'); + expect(forecast.insufficientData).toBe(true); + expect(forecast.insufficientDataMessage).toBeDefined(); + }); + + it('6b: only 2 snapshots returns insufficient', async () => { + const now = Date.now(); + const snapshots = generateSnapshots({ + count: 2, + startTime: now - 10 * 60_000, + intervalMs: 5 * 60_000, + startOps: 10_000, + endOps: 20_000, + connectionId: 'conn-1', + }); + await storage.saveMemorySnapshots(snapshots, 'conn-1'); + + const forecast = await service.getForecast('conn-1'); + expect(forecast.insufficientData).toBe(true); + }); + + it('6c: 5 snapshots but < 30 min span returns insufficient', async () => { + const now = Date.now(); + const snapshots = generateSnapshots({ + count: 5, + startTime: now - 20 * 60_000, + intervalMs: 5 * 60_000, + startOps: 10_000, + endOps: 20_000, + connectionId: 'conn-1', + }); + await storage.saveMemorySnapshots(snapshots, 'conn-1'); + + const forecast = await service.getForecast('conn-1'); + expect(forecast.insufficientData).toBe(true); + }); + + it('6d: exactly 30 min is sufficient', async () => { + const now = Date.now(); + const snapshots = generateSnapshots({ + count: 31, + startTime: now - 30 * 60_000, + intervalMs: 60_000, + startOps: 10_000, + endOps: 20_000, + connectionId: 'conn-1', + }); + await storage.saveMemorySnapshots(snapshots, 'conn-1'); + + const forecast = await service.getForecast('conn-1'); + expect(forecast.insufficientData).toBe(false); + }); + + it('6e: insufficient data still returns currentOpsPerSec', async () => { + const now = Date.now(); + const snapshots = generateSnapshots({ + count: 2, + startTime: now - 10 * 60_000, + intervalMs: 5 * 60_000, + startOps: 40_000, + endOps: 45_000, + connectionId: 'conn-1', + }); + await storage.saveMemorySnapshots(snapshots, 'conn-1'); + + const forecast = await service.getForecast('conn-1'); + expect(forecast.insufficientData).toBe(true); + expect(forecast.currentOpsPerSec).toBe(45_000); + }); + }); + + // ── Slice 7: Lazy Settings Creation ── + + describe('Slice 7: Lazy settings creation', () => { + it('7a: first access creates row from global defaults', async () => { + settingsService.getCachedSettings.mockReturnValue( + mockGlobalSettings({ throughputForecastingDefaultRollingWindowMs: 43200000 }), + ); + + const settings = await service.getSettings('conn-1'); + + expect(settings.rollingWindowMs).toBe(43200000); + expect(settings.enabled).toBe(true); + expect(settings.opsCeiling).toBeNull(); + + // Verify row was persisted + const persisted = await storage.getThroughputSettings('conn-1'); + expect(persisted).not.toBeNull(); + expect(persisted!.rollingWindowMs).toBe(43200000); + }); + + it('7b: global disabled returns disabled settings without persisting', async () => { + settingsService.getCachedSettings.mockReturnValue( + mockGlobalSettings({ throughputForecastingEnabled: false }), + ); + + const settings = await service.getSettings('conn-1'); + + expect(settings.enabled).toBe(false); + + // Verify no row was persisted + const persisted = await storage.getThroughputSettings('conn-1'); + expect(persisted).toBeNull(); + }); + }); + + // ── Slice 8: Update Settings and Cache Invalidation ── + + describe('Slice 8: Update settings', () => { + it('8a: update merges with existing settings', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, + startTime: now - 60 * 60_000, + intervalMs: 60_000, + startOps: 10_000, + endOps: 20_000, + connectionId: 'conn-1', + }), + 'conn-1', + ); + + const updated = await service.updateSettings('conn-1', { opsCeiling: 80_000 }); + + expect(updated.opsCeiling).toBe(80_000); + expect(updated.rollingWindowMs).toBe(21600000); // unchanged default + }); + + it('8b: update invalidates forecast cache', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, + startTime: now - 60 * 60_000, + intervalMs: 60_000, + startOps: 40_000, + endOps: 50_000, + connectionId: 'conn-1', + }), + 'conn-1', + ); + + // First forecast: trend mode (no ceiling) + const first = await service.getForecast('conn-1'); + expect(first.mode).toBe('trend'); + + // Update settings with a ceiling + await service.updateSettings('conn-1', { opsCeiling: 80_000 }); + + // Second forecast should reflect new ceiling, not cached result + const second = await service.getForecast('conn-1'); + expect(second.mode).toBe('forecast'); + }); + }); + + // ── Slice 9: Per-Connection Disabled ── + + describe('Slice 9: Per-connection disabled', () => { + it('9a: disabled connection returns enabled false', async () => { + await storage.saveThroughputSettings(makeThroughputSettings({ enabled: false })); + + const forecast = await service.getForecast('conn-1'); + + expect(forecast.enabled).toBe(false); + }); + + it('9b: re-enable returns valid forecast', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, + startTime: now - 60 * 60_000, + intervalMs: 60_000, + startOps: 10_000, + endOps: 20_000, + connectionId: 'conn-1', + }), + 'conn-1', + ); + await storage.saveThroughputSettings(makeThroughputSettings({ enabled: false })); + + // Disable returns enabled false + const disabled = await service.getForecast('conn-1'); + expect(disabled.enabled).toBe(false); + + // Re-enable + await service.updateSettings('conn-1', { enabled: true }); + const enabled = await service.getForecast('conn-1'); + expect(enabled.enabled).toBe(true); + expect(enabled.insufficientData).toBe(false); + }); + }); + + // ── Slice 10: Forecast Cache ── + + describe('Slice 10: Forecast cache', () => { + beforeEach(() => { + jest.useFakeTimers(); + }); + + afterEach(() => { + jest.useRealTimers(); + }); + + it('10a: second call within TTL uses cache', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, + startTime: now - 60 * 60_000, + intervalMs: 60_000, + startOps: 10_000, + endOps: 20_000, + connectionId: 'conn-1', + }), + 'conn-1', + ); + + const spy = jest.spyOn(storage, 'getMemorySnapshots'); + + await service.getForecast('conn-1'); + await service.getForecast('conn-1'); + + expect(spy.mock.calls.length).toBe(1); + }); + + it('10b: call after TTL expires recomputes', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, + startTime: now - 60 * 60_000, + intervalMs: 60_000, + startOps: 10_000, + endOps: 20_000, + connectionId: 'conn-1', + }), + 'conn-1', + ); + + const spy = jest.spyOn(storage, 'getMemorySnapshots'); + + await service.getForecast('conn-1'); + jest.advanceTimersByTime(61_000); + await service.getForecast('conn-1'); + + expect(spy.mock.calls.length).toBe(2); + }); + }); +}); diff --git a/apps/api/src/throughput-forecasting/throughput-forecasting.controller.ts b/apps/api/src/throughput-forecasting/throughput-forecasting.controller.ts new file mode 100644 index 00000000..9727c427 --- /dev/null +++ b/apps/api/src/throughput-forecasting/throughput-forecasting.controller.ts @@ -0,0 +1,27 @@ +import { Controller, Get, Put, Body } from '@nestjs/common'; +import { ThroughputForecastingService } from './throughput-forecasting.service'; +import { ConnectionId } from '../common/decorators/connection-id.decorator'; +import type { ThroughputForecast, ThroughputSettings, ThroughputSettingsUpdate } from '@betterdb/shared'; + +@Controller('throughput-forecasting') +export class ThroughputForecastingController { + constructor(private readonly service: ThroughputForecastingService) {} + + @Get('forecast') + async getForecast(@ConnectionId() connectionId?: string): Promise { + return this.service.getForecast(connectionId || 'env-default'); + } + + @Get('settings') + async getSettings(@ConnectionId() connectionId?: string): Promise { + return this.service.getSettings(connectionId || 'env-default'); + } + + @Put('settings') + async updateSettings( + @ConnectionId() connectionId?: string, + @Body() updates?: ThroughputSettingsUpdate, + ): Promise { + return this.service.updateSettings(connectionId || 'env-default', updates || {}); + } +} diff --git a/apps/api/src/throughput-forecasting/throughput-forecasting.module.ts b/apps/api/src/throughput-forecasting/throughput-forecasting.module.ts new file mode 100644 index 00000000..361ac30b --- /dev/null +++ b/apps/api/src/throughput-forecasting/throughput-forecasting.module.ts @@ -0,0 +1,13 @@ +import { Module } from '@nestjs/common'; +import { StorageModule } from '../storage/storage.module'; +import { ConnectionsModule } from '../connections/connections.module'; +import { ThroughputForecastingService } from './throughput-forecasting.service'; +import { ThroughputForecastingController } from './throughput-forecasting.controller'; + +@Module({ + imports: [StorageModule, ConnectionsModule], + providers: [ThroughputForecastingService], + controllers: [ThroughputForecastingController], + exports: [ThroughputForecastingService], +}) +export class ThroughputForecastingModule {} diff --git a/apps/api/src/throughput-forecasting/throughput-forecasting.service.ts b/apps/api/src/throughput-forecasting/throughput-forecasting.service.ts new file mode 100644 index 00000000..de6d4aad --- /dev/null +++ b/apps/api/src/throughput-forecasting/throughput-forecasting.service.ts @@ -0,0 +1,340 @@ +import { + Inject, + Injectable, + Logger, + OnModuleDestroy, + OnModuleInit, + Optional, +} from '@nestjs/common'; +import type { StoragePort } from '../common/interfaces/storage-port.interface'; +import { SettingsService } from '../settings/settings.service'; +import { ConnectionRegistry } from '../connections/connection-registry.service'; +import type { + ThroughputForecast, + ThroughputSettings, + ThroughputSettingsUpdate, +} from '@betterdb/shared'; +import { WEBHOOK_EVENTS_PRO_SERVICE, type IWebhookEventsProService } from '@betterdb/shared'; + +const MIN_DATA_POINTS = 3; +const MIN_TIME_SPAN_MS = 30 * 60_000; // 30 minutes +const TREND_THRESHOLD_PERCENT = 5; +const CACHE_TTL_MS = 60_000; +const ALERT_CHECK_INTERVAL_MS = 60_000; + +@Injectable() +export class ThroughputForecastingService implements OnModuleInit, OnModuleDestroy { + private readonly logger = new Logger(ThroughputForecastingService.name); + private forecastCache = new Map(); + private alertInterval: ReturnType | null = null; + + constructor( + @Inject('STORAGE_CLIENT') private readonly storage: StoragePort, + private readonly settingsService: SettingsService, + private readonly connectionRegistry: ConnectionRegistry, + @Optional() + @Inject(WEBHOOK_EVENTS_PRO_SERVICE) + private readonly webhookEventsProService?: IWebhookEventsProService, + ) {} + + onModuleInit(): void { + if (this.webhookEventsProService) { + this.logger.log('Enabling throughput forecasting webhook alerts'); + this.alertInterval = setInterval(() => this.checkAlerts(), ALERT_CHECK_INTERVAL_MS); + } + } + + onModuleDestroy(): void { + if (this.alertInterval) { + this.logger.log('Disabling throughput forecasting webhook alerts'); + clearInterval(this.alertInterval); + this.alertInterval = null; + } + } + + async getForecast(connectionId: string): Promise { + // Check cache + const cached = this.forecastCache.get(connectionId); + if (cached && Date.now() - cached.computedAt < CACHE_TTL_MS) { + return cached.forecast; + } + + // Check global toggle + const globalSettings = this.settingsService.getCachedSettings(); + if (!globalSettings.throughputForecastingEnabled) { + return this.buildDisabledForecast(connectionId); + } + + // Check per-connection settings + const settings = await this.getOrCreateSettings(connectionId); + if (!settings.enabled) { + return this.buildDisabledForecast(connectionId); + } + + // Query snapshots + const now = Date.now(); + const snapshots = await this.storage.getMemorySnapshots({ + connectionId, + startTime: now - settings.rollingWindowMs, + limit: 1500, + }); + + // Reverse to ascending (query returns DESC) + const sorted = [...snapshots].reverse(); + + // Check sufficient data + const latestOps = sorted.length > 0 ? sorted[sorted.length - 1].opsPerSec : 0; + if (sorted.length < MIN_DATA_POINTS) { + return this.buildInsufficientForecast(connectionId, settings, latestOps); + } + const timeSpan = sorted[sorted.length - 1].timestamp - sorted[0].timestamp; + if (timeSpan < MIN_TIME_SPAN_MS) { + return this.buildInsufficientForecast(connectionId, settings, latestOps); + } + + // Linear regression + const points = sorted.map((s) => ({ x: s.timestamp, y: s.opsPerSec })); + const { slope, intercept } = this.linearRegression(points); + + // Compute metrics + const windowStart = sorted[0].timestamp; + const windowEnd = sorted[sorted.length - 1].timestamp; + const predictedStart = slope * windowStart + intercept; + const predictedEnd = slope * windowEnd + intercept; + const currentOpsPerSec = latestOps; + const growthRate = slope * 3_600_000; // ops/sec per hour + const growthPercent = + predictedStart !== 0 ? ((predictedEnd - predictedStart) / Math.abs(predictedStart)) * 100 : 0; + + const trendDirection: 'rising' | 'falling' | 'stable' = + growthPercent > TREND_THRESHOLD_PERCENT + ? 'rising' + : growthPercent < -TREND_THRESHOLD_PERCENT + ? 'falling' + : 'stable'; + + const baseForecast = { + connectionId, + currentOpsPerSec, + growthRate, + growthPercent, + trendDirection, + dataPointCount: sorted.length, + windowMs: settings.rollingWindowMs, + enabled: true, + insufficientData: false, + }; + + let forecast: ThroughputForecast; + + if (settings.opsCeiling === null) { + // Trend mode + forecast = { + ...baseForecast, + mode: 'trend', + opsCeiling: null, + timeToLimitMs: null, + timeToLimitHuman: this.formatTrendSummary( + growthPercent, + trendDirection, + settings.rollingWindowMs, + ), + }; + } else { + // Forecast mode + const currentPredicted = slope * now + intercept; + + if (currentPredicted >= settings.opsCeiling) { + forecast = { + ...baseForecast, + mode: 'forecast', + opsCeiling: settings.opsCeiling, + timeToLimitMs: 0, + timeToLimitHuman: 'Ceiling already exceeded', + }; + } else if (trendDirection !== 'rising' || slope <= 0) { + forecast = { + ...baseForecast, + mode: 'forecast', + opsCeiling: settings.opsCeiling, + timeToLimitMs: null, + timeToLimitHuman: 'Not projected to reach ceiling', + }; + } else { + const timeToLimitMs = (settings.opsCeiling - currentPredicted) / slope; + forecast = { + ...baseForecast, + mode: 'forecast', + opsCeiling: settings.opsCeiling, + timeToLimitMs, + timeToLimitHuman: this.formatTimeToLimit(timeToLimitMs), + }; + } + } + + // Cache + this.forecastCache.set(connectionId, { forecast, computedAt: Date.now() }); + return forecast; + } + + async getSettings(connectionId: string): Promise { + return this.getOrCreateSettings(connectionId); + } + + async updateSettings( + connectionId: string, + updates: ThroughputSettingsUpdate, + ): Promise { + const current = await this.getOrCreateSettings(connectionId); + const merged: ThroughputSettings = { + ...current, + ...updates, + connectionId, + updatedAt: Date.now(), + }; + const saved = await this.storage.saveThroughputSettings(merged); + this.forecastCache.delete(connectionId); + return saved; + } + + private async getOrCreateSettings(connectionId: string): Promise { + const existing = await this.storage.getThroughputSettings(connectionId); + if (existing) return existing; + + const globalSettings = this.settingsService.getCachedSettings(); + if (!globalSettings.throughputForecastingEnabled) { + return { + connectionId, + enabled: false, + opsCeiling: null, + rollingWindowMs: globalSettings.throughputForecastingDefaultRollingWindowMs, + alertThresholdMs: globalSettings.throughputForecastingDefaultAlertThresholdMs, + updatedAt: Date.now(), + }; + } + + const newSettings: ThroughputSettings = { + connectionId, + enabled: true, + opsCeiling: null, + rollingWindowMs: globalSettings.throughputForecastingDefaultRollingWindowMs, + alertThresholdMs: globalSettings.throughputForecastingDefaultAlertThresholdMs, + updatedAt: Date.now(), + }; + return this.storage.saveThroughputSettings(newSettings); + } + + private linearRegression(points: { x: number; y: number }[]): { + slope: number; + intercept: number; + } { + const n = points.length; + if (n === 0) return { slope: 0, intercept: 0 }; + if (n === 1) return { slope: 0, intercept: points[0].y }; + + let sumX = 0, + sumY = 0, + sumXY = 0, + sumX2 = 0; + for (const p of points) { + sumX += p.x; + sumY += p.y; + sumXY += p.x * p.y; + sumX2 += p.x * p.x; + } + const denom = n * sumX2 - sumX * sumX; + if (denom === 0) return { slope: 0, intercept: sumY / n }; + const slope = (n * sumXY - sumX * sumY) / denom; + const intercept = (sumY - slope * sumX) / n; + return { slope, intercept }; + } + + private formatTimeToLimit(ms: number): string { + if (ms < 3_600_000) return `~${Math.round(ms / 60_000)}m at current growth rate`; + if (ms < 86_400_000) return `~${(ms / 3_600_000).toFixed(1)}h at current growth rate`; + return `~${(ms / 86_400_000).toFixed(1)}d at current growth rate`; + } + + private formatTrendSummary(growthPercent: number, direction: string, windowMs: number): string { + const windowHours = windowMs / 3_600_000; + const sign = growthPercent >= 0 ? '+' : ''; + return `${sign}${growthPercent.toFixed(1)}% over ${windowHours}h, ${direction}`; + } + + private buildDisabledForecast(connectionId: string): ThroughputForecast { + return { + connectionId, + mode: 'trend', + currentOpsPerSec: 0, + growthRate: 0, + growthPercent: 0, + trendDirection: 'stable', + dataPointCount: 0, + windowMs: 0, + opsCeiling: null, + timeToLimitMs: null, + timeToLimitHuman: '', + enabled: false, + insufficientData: false, + }; + } + + private buildInsufficientForecast( + connectionId: string, + settings: ThroughputSettings, + currentOpsPerSec: number, + ): ThroughputForecast { + return { + connectionId, + mode: 'trend', + currentOpsPerSec, + growthRate: 0, + growthPercent: 0, + trendDirection: 'stable', + dataPointCount: 0, + windowMs: settings.rollingWindowMs, + opsCeiling: settings.opsCeiling, + timeToLimitMs: null, + timeToLimitHuman: '', + enabled: true, + insufficientData: true, + insufficientDataMessage: + 'Need at least 30 minutes of monitoring history. Data will be available shortly.', + }; + } + + private async checkAlerts(): Promise { + if (!this.webhookEventsProService) return; + + const globalSettings = this.settingsService.getCachedSettings(); + if (!globalSettings.throughputForecastingEnabled) return; + + try { + const activeSettings = await this.storage.getActiveThroughputSettings(); + for (const settings of activeSettings) { + const forecast = await this.getForecast(settings.connectionId); + if ( + forecast.timeToLimitMs !== null && + forecast.timeToLimitMs > 0 && + forecast.opsCeiling !== null + ) { + const config = this.connectionRegistry.getConfig(settings.connectionId); + if (config) { + await this.webhookEventsProService.dispatchThroughputLimit({ + currentOpsPerSec: forecast.currentOpsPerSec, + opsCeiling: forecast.opsCeiling, + timeToLimitMs: forecast.timeToLimitMs, + threshold: settings.alertThresholdMs, + growthRate: forecast.growthRate, + timestamp: Date.now(), + instance: { host: config.host, port: config.port }, + connectionId: settings.connectionId, + }); + } + } + } + } catch (err) { + this.logger.warn(`Alert check failed: ${err instanceof Error ? err.message : err}`); + } + } +} diff --git a/apps/web/src/App.tsx b/apps/web/src/App.tsx index 042d968e..e7113cb6 100644 --- a/apps/web/src/App.tsx +++ b/apps/web/src/App.tsx @@ -28,9 +28,11 @@ import { ClusterDashboard } from './pages/ClusterDashboard'; import { Settings } from './pages/Settings'; import { Webhooks } from './pages/Webhooks'; import { VectorSearch } from './pages/VectorSearch'; +import { ThroughputForecasting } from './pages/ThroughputForecasting'; import { Members } from './pages/Members'; import { workspaceApi, CloudUser } from './api/workspace'; import { Feature } from '@betterdb/shared'; +import { settingsApi } from './api/settings'; function App() { return ( @@ -96,9 +98,16 @@ function AppLayout({ cloudUser }: { cloudUser: CloudUser | null }) { const location = useLocation(); const { hasVectorSearch } = useCapabilities(); const [showFeedback, setShowFeedback] = useState(false); + const [throughputForecastingEnabled, setThroughputForecastingEnabled] = useState(true); useIdleTracker(); useNavigationTracker(); + useEffect(() => { + settingsApi.getSettings() + .then((res) => setThroughputForecastingEnabled(res.settings.throughputForecastingEnabled ?? true)) + .catch(() => {}); + }, [location.pathname]); + return (
)} + {activeCategory === 'throughputForecasting' && ( +
+

Throughput Forecasting

+

+ These defaults are applied when throughput forecasting is first activated for a connection. + Per-connection settings can be customized on the Throughput Forecast page. +

+ +
+ + +
+ +
+ + +
+ +
+ + +
+
+ )} + {activeCategory === 'mcpTokens' && (

MCP Tokens

diff --git a/apps/web/src/pages/ThroughputForecasting.tsx b/apps/web/src/pages/ThroughputForecasting.tsx new file mode 100644 index 00000000..e8b4568b --- /dev/null +++ b/apps/web/src/pages/ThroughputForecasting.tsx @@ -0,0 +1,362 @@ +import { useState, useEffect, useCallback, useRef } from 'react'; +import { useConnection } from '../hooks/useConnection'; +import { usePolling } from '../hooks/usePolling'; +import { metricsApi } from '../api/metrics'; +import { Card } from '../components/ui/card'; +import type { ThroughputForecast, ThroughputSettings, ThroughputSettingsUpdate } from '../types/throughput'; +import { + LineChart, + Line, + XAxis, + YAxis, + Tooltip, + ReferenceLine, + ResponsiveContainer, + CartesianGrid, +} from 'recharts'; + +const WINDOW_PRESETS = [ + { label: '1h', value: 3600000 }, + { label: '3h', value: 10800000 }, + { label: '6h', value: 21600000 }, + { label: '12h', value: 43200000 }, + { label: '24h', value: 86400000 }, +]; + +const ALERT_PRESETS = [ + { label: '30m', value: 1800000 }, + { label: '1h', value: 3600000 }, + { label: '2h', value: 7200000 }, + { label: '4h', value: 14400000 }, +]; + +function formatOps(value: number): string { + if (value >= 1_000_000) return `${(value / 1_000_000).toFixed(1)}M`; + if (value >= 1_000) return `${(value / 1_000).toFixed(1)}K`; + return value.toString(); +} + +function formatTime(timestamp: number): string { + return new Date(timestamp).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' }); +} + +export function ThroughputForecasting() { + const { currentConnection } = useConnection(); + const [settings, setSettings] = useState(null); + const [saveStatus, setSaveStatus] = useState<'idle' | 'saved' | 'error'>('idle'); + const saveTimeout = useRef>(undefined); + const debounceTimeout = useRef>(undefined); + + const { data: forecast, refresh: refreshForecast } = usePolling({ + fetcher: (signal?: AbortSignal) => metricsApi.getThroughputForecast(signal), + interval: 30_000, + enabled: true, + refetchKey: currentConnection?.id, + }); + + // Load settings + useEffect(() => { + metricsApi.getThroughputSettings().then(setSettings).catch(() => {}); + }, [currentConnection?.id]); // eslint-disable-line react-hooks/exhaustive-deps + + // Chart data + const [chartData, setChartData] = useState>([]); + useEffect(() => { + if (!settings) return; + const now = Date.now(); + metricsApi + .getStoredMemorySnapshots({ startTime: now - settings.rollingWindowMs, limit: 1500 }) + .then((snapshots) => { + const sorted = [...snapshots].sort((a, b) => a.timestamp - b.timestamp); + setChartData(sorted.map((s) => ({ time: s.timestamp, ops: s.opsPerSec, label: formatTime(s.timestamp) }))); + }) + .catch(() => {}); + }, [settings?.rollingWindowMs, currentConnection?.id, forecast]); + + const updateSetting = useCallback( + (updates: ThroughputSettingsUpdate) => { + if (debounceTimeout.current) clearTimeout(debounceTimeout.current); + setSettings((prev: ThroughputSettings | null) => (prev ? { ...prev, ...updates, updatedAt: Date.now() } : prev)); + + debounceTimeout.current = setTimeout(async () => { + try { + const updated = await metricsApi.updateThroughputSettings(updates); + setSettings(updated); + setSaveStatus('saved'); + refreshForecast(); + if (saveTimeout.current) clearTimeout(saveTimeout.current); + saveTimeout.current = setTimeout(() => setSaveStatus('idle'), 2000); + } catch { + setSaveStatus('error'); + } + }, 500); + }, + [refreshForecast], + ); + + // ── Page States ── + + if (!forecast || !settings) { + return ( +
+

Throughput Forecast

+

Loading...

+
+ ); + } + + if (!forecast.enabled) { + return ( +
+

Throughput Forecast

+ +
+

Throughput forecasting is disabled for this connection.

+ +
+
+ +
+ ); + } + + return ( +
+

Throughput Forecast

+ + + + {forecast.insufficientData ? ( + +

{forecast.insufficientDataMessage}

+ {forecast.currentOpsPerSec > 0 && ( +

{formatOps(forecast.currentOpsPerSec)} ops/sec

+ )} +
+ ) : ( + <> + + + + )} +
+ ); +} + +// ── Settings Panel ── + +function SettingsPanel({ + settings, + onUpdate, + saveStatus, +}: { + settings: ThroughputSettings; + onUpdate: (u: ThroughputSettingsUpdate) => void; + saveStatus: 'idle' | 'saved' | 'error'; +}) { + return ( + +
+

Settings

+
+ {saveStatus === 'saved' && Saved} + {saveStatus === 'error' && Error saving} +
+
+
+
+ + +
+ +
+ + onUpdate({ opsCeiling: e.target.value ? parseInt(e.target.value) : null })} + className="w-full px-3 py-2 border rounded-md" + /> +
+ +
+ + +
+
+
+ ); +} + +// ── Forecast Card ── + +function ForecastCard({ forecast }: { forecast: ThroughputForecast }) { + const directionArrow = forecast.trendDirection === 'rising' ? '\u2197' : forecast.trendDirection === 'falling' ? '\u2198' : '\u2192'; + + return ( + +

+ {forecast.mode === 'forecast' ? 'Throughput Forecast' : 'Throughput Trend'} +

+ + {forecast.mode === 'forecast' && forecast.timeToLimitHuman && ( +

{forecast.timeToLimitHuman}

+ )} + +
+
+

Current

+

{formatOps(forecast.currentOpsPerSec)} ops/sec

+
+ {forecast.opsCeiling && ( +
+

Ceiling

+

{formatOps(forecast.opsCeiling)} ops/sec

+
+ )} +
+

Growth Rate

+

{forecast.growthRate >= 0 ? '+' : ''}{formatOps(Math.round(forecast.growthRate))}/hr

+
+
+

Trend

+

+ {directionArrow} {forecast.trendDirection} ({forecast.growthPercent >= 0 ? '+' : ''}{forecast.growthPercent.toFixed(1)}%) +

+
+
+
+ ); +} + +// ── Chart ── + +function ThroughputChart({ + chartData, + forecast, + settings, +}: { + chartData: Array<{ time: number; ops: number; label: string }>; + forecast: ThroughputForecast; + settings: ThroughputSettings; +}) { + if (chartData.length === 0) return null; + + // Build trend line data + const trendData: Array<{ time: number; trend: number; label: string }> = []; + if (chartData.length >= 2 && forecast.growthRate !== 0) { + const firstTime = chartData[0].time; + const lastTime = chartData[chartData.length - 1].time; + const now = Date.now(); + // Extend forward: to ceiling or 2x window, whichever is sooner + const extendMs = settings.opsCeiling !== null && forecast.timeToLimitMs !== null && forecast.timeToLimitMs > 0 + ? Math.min(forecast.timeToLimitMs, settings.rollingWindowMs) + : settings.rollingWindowMs; + const endTime = now + extendMs; + + // Use regression to compute trend values + const slopePerMs = forecast.growthRate / 3_600_000; + const lastOps = chartData[chartData.length - 1].ops; + const intercept = lastOps - slopePerMs * lastTime; + + // Historical portion of trend + trendData.push({ time: firstTime, trend: slopePerMs * firstTime + intercept, label: formatTime(firstTime) }); + trendData.push({ time: lastTime, trend: slopePerMs * lastTime + intercept, label: formatTime(lastTime) }); + // Projected portion + if (endTime > lastTime) { + trendData.push({ time: endTime, trend: slopePerMs * endTime + intercept, label: formatTime(endTime) }); + } + } + + // Merge for common x-axis + const allTimes = new Set([...chartData.map((d) => d.time), ...trendData.map((d) => d.time)]); + const merged = [...allTimes] + .sort((a, b) => a - b) + .map((t) => { + const dataPoint = chartData.find((d) => d.time === t); + const trendPoint = trendData.find((d) => d.time === t); + return { + time: t, + label: formatTime(t), + ops: dataPoint?.ops ?? undefined, + trend: trendPoint?.trend ?? undefined, + }; + }); + + return ( + +

Ops/sec History

+ + + + + + [formatOps(Number(value)), '']} + labelFormatter={(label) => String(label)} + /> + + + {settings.opsCeiling !== null && ( + + )} + + +
+ ); +} diff --git a/apps/web/src/types/throughput.ts b/apps/web/src/types/throughput.ts new file mode 100644 index 00000000..7a4f3f26 --- /dev/null +++ b/apps/web/src/types/throughput.ts @@ -0,0 +1,32 @@ +export interface ThroughputSettings { + connectionId: string; + enabled: boolean; + opsCeiling: number | null; + rollingWindowMs: number; + alertThresholdMs: number; + updatedAt: number; +} + +export interface ThroughputForecast { + connectionId: string; + mode: 'trend' | 'forecast'; + currentOpsPerSec: number; + growthRate: number; + growthPercent: number; + trendDirection: 'rising' | 'falling' | 'stable'; + dataPointCount: number; + windowMs: number; + opsCeiling: number | null; + timeToLimitMs: number | null; + timeToLimitHuman: string; + enabled: boolean; + insufficientData: boolean; + insufficientDataMessage?: string; +} + +export interface ThroughputSettingsUpdate { + enabled?: boolean; + opsCeiling?: number | null; + rollingWindowMs?: number; + alertThresholdMs?: number; +} diff --git a/packages/shared/src/index.ts b/packages/shared/src/index.ts index 5a418089..61c9c5b9 100644 --- a/packages/shared/src/index.ts +++ b/packages/shared/src/index.ts @@ -13,3 +13,4 @@ export * from './utils/key-patterns'; export * from './license/index'; export * from './webhooks/index'; export * from './types/vector-index-snapshots'; +export * from './types/throughput.types'; diff --git a/packages/shared/src/types/settings.types.ts b/packages/shared/src/types/settings.types.ts index 3344cc40..91948a78 100644 --- a/packages/shared/src/types/settings.types.ts +++ b/packages/shared/src/types/settings.types.ts @@ -9,6 +9,10 @@ export interface AppSettings { anomalyCacheTtlMs: number; anomalyPrometheusIntervalMs: number; + throughputForecastingEnabled: boolean; + throughputForecastingDefaultRollingWindowMs: number; + throughputForecastingDefaultAlertThresholdMs: number; + updatedAt: number; createdAt: number; } diff --git a/packages/shared/src/types/throughput.types.ts b/packages/shared/src/types/throughput.types.ts new file mode 100644 index 00000000..7a4f3f26 --- /dev/null +++ b/packages/shared/src/types/throughput.types.ts @@ -0,0 +1,32 @@ +export interface ThroughputSettings { + connectionId: string; + enabled: boolean; + opsCeiling: number | null; + rollingWindowMs: number; + alertThresholdMs: number; + updatedAt: number; +} + +export interface ThroughputForecast { + connectionId: string; + mode: 'trend' | 'forecast'; + currentOpsPerSec: number; + growthRate: number; + growthPercent: number; + trendDirection: 'rising' | 'falling' | 'stable'; + dataPointCount: number; + windowMs: number; + opsCeiling: number | null; + timeToLimitMs: number | null; + timeToLimitHuman: string; + enabled: boolean; + insufficientData: boolean; + insufficientDataMessage?: string; +} + +export interface ThroughputSettingsUpdate { + enabled?: boolean; + opsCeiling?: number | null; + rollingWindowMs?: number; + alertThresholdMs?: number; +} diff --git a/packages/shared/src/webhooks/types.ts b/packages/shared/src/webhooks/types.ts index d84468fb..b3a45aeb 100644 --- a/packages/shared/src/webhooks/types.ts +++ b/packages/shared/src/webhooks/types.ts @@ -15,6 +15,7 @@ export enum WebhookEventType { CLUSTER_FAILOVER = 'cluster.failover', AUDIT_POLICY_VIOLATION = 'audit.policy.violation', COMPLIANCE_ALERT = 'compliance.alert', + THROUGHPUT_LIMIT = 'throughput.limit', } // Injection tokens for proprietary webhook services @@ -37,6 +38,7 @@ export const PRO_EVENTS: WebhookEventType[] = [ WebhookEventType.CLUSTER_FAILOVER, WebhookEventType.LATENCY_SPIKE, WebhookEventType.CONNECTION_SPIKE, + WebhookEventType.THROUGHPUT_LIMIT, ]; export const ENTERPRISE_EVENTS: WebhookEventType[] = [ @@ -73,6 +75,7 @@ export const WEBHOOK_EVENT_TIERS: Record = { [WebhookEventType.CLUSTER_FAILOVER]: Tier.pro, [WebhookEventType.LATENCY_SPIKE]: Tier.pro, [WebhookEventType.CONNECTION_SPIKE]: Tier.pro, + [WebhookEventType.THROUGHPUT_LIMIT]: Tier.pro, // Enterprise tier events [WebhookEventType.AUDIT_POLICY_VIOLATION]: Tier.enterprise, @@ -309,6 +312,17 @@ export interface IWebhookEventsProService { instance: WebhookInstanceInfo; connectionId?: string; }): Promise; + + dispatchThroughputLimit(data: { + currentOpsPerSec: number; + opsCeiling: number; + timeToLimitMs: number; + threshold: number; + growthRate: number; + timestamp: number; + instance: WebhookInstanceInfo; + connectionId?: string; + }): Promise; } /** diff --git a/proprietary/webhook-pro/__tests__/throughput-limit.spec.ts b/proprietary/webhook-pro/__tests__/throughput-limit.spec.ts new file mode 100644 index 00000000..f7c14f1c --- /dev/null +++ b/proprietary/webhook-pro/__tests__/throughput-limit.spec.ts @@ -0,0 +1,81 @@ +import { Test, TestingModule } from '@nestjs/testing'; +import { WebhookEventsProService } from '../webhook-events-pro.service'; +import { WebhookDispatcherService } from '@app/webhooks/webhook-dispatcher.service'; +import { WebhookEventType } from '@betterdb/shared'; +import { LicenseService } from '@proprietary/licenses'; + +describe('WebhookEventsProService - dispatchThroughputLimit', () => { + let service: WebhookEventsProService; + let webhookDispatcher: { dispatchThresholdAlert: jest.Mock }; + let licenseService: { getLicenseTier: jest.Mock }; + + const testData = { + currentOpsPerSec: 50_000, + opsCeiling: 80_000, + timeToLimitMs: 7_200_000, // 2 hours + threshold: 7_200_000, + growthRate: 10_000, + timestamp: Date.now(), + instance: { host: 'localhost', port: 6379 }, + connectionId: 'conn-42', + }; + + beforeEach(async () => { + webhookDispatcher = { + dispatchThresholdAlert: jest.fn().mockResolvedValue(undefined), + }; + licenseService = { + getLicenseTier: jest.fn().mockReturnValue('pro'), + }; + + const module: TestingModule = await Test.createTestingModule({ + providers: [ + WebhookEventsProService, + { provide: WebhookDispatcherService, useValue: webhookDispatcher }, + { provide: LicenseService, useValue: licenseService }, + ], + }).compile(); + + service = module.get(WebhookEventsProService); + }); + + // ── Slice 11: Webhook Dispatch (Pro) ── + + it('11a: dispatches with correct parameters when Pro licensed', async () => { + await service.dispatchThroughputLimit(testData); + + expect(webhookDispatcher.dispatchThresholdAlert).toHaveBeenCalledTimes(1); + + const [eventType, alertKey, value, threshold, isAbove] = + webhookDispatcher.dispatchThresholdAlert.mock.calls[0]; + + expect(eventType).toBe(WebhookEventType.THROUGHPUT_LIMIT); + expect(isAbove).toBe(false); + expect(value).toBe(7_200_000); + expect(threshold).toBe(7_200_000); + }); + + it('11b: payload contains human-readable message', async () => { + await service.dispatchThroughputLimit(testData); + + const payload = webhookDispatcher.dispatchThresholdAlert.mock.calls[0][5]; + expect(payload.message).toContain('~2h'); + }); + + it('11c: alert key includes connectionId', async () => { + await service.dispatchThroughputLimit(testData); + + const alertKey = webhookDispatcher.dispatchThresholdAlert.mock.calls[0][1]; + expect(alertKey).toBe('throughput_limit:conn-42'); + }); + + // ── Slice 12: Webhook Skips (Community) ── + + it('12a: skips dispatch when Community tier', async () => { + licenseService.getLicenseTier.mockReturnValue('community'); + + await service.dispatchThroughputLimit(testData); + + expect(webhookDispatcher.dispatchThresholdAlert).not.toHaveBeenCalled(); + }); +}); diff --git a/proprietary/webhook-pro/webhook-events-pro.service.ts b/proprietary/webhook-pro/webhook-events-pro.service.ts index 26ea3770..64a60b9d 100644 --- a/proprietary/webhook-pro/webhook-events-pro.service.ts +++ b/proprietary/webhook-pro/webhook-events-pro.service.ts @@ -253,4 +253,42 @@ export class WebhookEventsProService implements OnModuleInit { data.connectionId ); } + + /** + * Dispatch throughput limit event (PRO+) + * Called when projected time-to-limit drops below configured threshold + */ + async dispatchThroughputLimit(data: { + currentOpsPerSec: number; + opsCeiling: number; + timeToLimitMs: number; + threshold: number; + growthRate: number; + timestamp: number; + instance: { host: string; port: number }; + connectionId?: string; + }): Promise { + if (!this.isEnabled()) { + this.logger.debug('Throughput limit event skipped - requires PRO license'); + return; + } + + await this.webhookDispatcher.dispatchThresholdAlert( + WebhookEventType.THROUGHPUT_LIMIT, + `throughput_limit:${data.connectionId || 'default'}`, + data.timeToLimitMs, + data.threshold, + false, // isAbove = false: fire when timeToLimit drops BELOW threshold + { + currentOpsPerSec: data.currentOpsPerSec, + opsCeiling: data.opsCeiling, + timeToLimitMs: data.timeToLimitMs, + growthRate: data.growthRate, + message: `Ops/sec projected to reach ceiling (${data.opsCeiling}) in ~${Math.round(data.timeToLimitMs / 3_600_000)}h`, + timestamp: data.timestamp, + instance: data.instance, + }, + data.connectionId, + ); + } } From 5d8dde4e3b7e2d232c98501e0a0506a921477025 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Fri, 27 Mar 2026 15:17:25 +0200 Subject: [PATCH 02/20] Refactor throughput forecasting settings to use a reusable section component --- ...hroughput-forecasting-settings-section.tsx | 64 ++++++++++++++++ apps/web/src/components/ui/toggle.tsx | 30 ++++++++ apps/web/src/pages/Settings.tsx | 74 ++++++------------- 3 files changed, 116 insertions(+), 52 deletions(-) create mode 100644 apps/web/src/components/pages/settings/throughput-forecasting-settings-section.tsx create mode 100644 apps/web/src/components/ui/toggle.tsx diff --git a/apps/web/src/components/pages/settings/throughput-forecasting-settings-section.tsx b/apps/web/src/components/pages/settings/throughput-forecasting-settings-section.tsx new file mode 100644 index 00000000..b2526294 --- /dev/null +++ b/apps/web/src/components/pages/settings/throughput-forecasting-settings-section.tsx @@ -0,0 +1,64 @@ +import { Toggle } from '../../ui/toggle'; + +interface ThroughputForecastingSettingsSectionProps { + throughputForecastingEnabled: boolean; + throughputForecastingDefaultRollingWindowMs: number; + throughputForecastingDefaultAlertThresholdMs: number; + onToggleEnabled: () => void; + onRollingWindowChange: (value: number) => void; + onAlertThresholdChange: (value: number) => void; +} + +export function ThroughputForecastingSettingsSection({ + throughputForecastingEnabled, + throughputForecastingDefaultRollingWindowMs, + throughputForecastingDefaultAlertThresholdMs, + onToggleEnabled, + onRollingWindowChange, + onAlertThresholdChange, +}: ThroughputForecastingSettingsSectionProps) { + return ( +
+

Throughput Forecasting

+

+ These defaults are applied when throughput forecasting is first activated for a connection. + Per-connection settings can be customized on the Throughput Forecast page. +

+ +
+
+ + +
+
+ + +
+ +
+ + +
+
+
+ ); +} diff --git a/apps/web/src/components/ui/toggle.tsx b/apps/web/src/components/ui/toggle.tsx new file mode 100644 index 00000000..88cfa6df --- /dev/null +++ b/apps/web/src/components/ui/toggle.tsx @@ -0,0 +1,30 @@ +import { cn } from '../../lib/utils'; + +interface ToggleProps { + checked: boolean; + onChange: () => void; + className?: string; +} + +export function Toggle({ checked, onChange, className }: ToggleProps) { + return ( + + ); +} diff --git a/apps/web/src/pages/Settings.tsx b/apps/web/src/pages/Settings.tsx index 7a3d07a7..6c791bdc 100644 --- a/apps/web/src/pages/Settings.tsx +++ b/apps/web/src/pages/Settings.tsx @@ -5,6 +5,7 @@ import { useConnection } from '../hooks/useConnection'; import { AppSettings, SettingsUpdateRequest } from '@betterdb/shared'; import { Card } from '../components/ui/card'; import { Badge } from '../components/ui/badge'; +import { ThroughputForecastingSettingsSection } from '../components/pages/settings/throughput-forecasting-settings-section'; type SettingsCategory = 'audit' | 'clientAnalytics' | 'anomaly' | 'throughputForecasting' | 'mcpTokens'; @@ -280,58 +281,27 @@ export function Settings({ isCloudMode = false }: { isCloudMode?: boolean }) { )} {activeCategory === 'throughputForecasting' && ( -
-

Throughput Forecasting

-

- These defaults are applied when throughput forecasting is first activated for a connection. - Per-connection settings can be customized on the Throughput Forecast page. -

- -
- - -
- -
- - -
- -
- - -
-
+ + handleInputChange( + 'throughputForecastingEnabled', + !formData.throughputForecastingEnabled, + ) + } + onRollingWindowChange={(value) => + handleInputChange('throughputForecastingDefaultRollingWindowMs', value) + } + onAlertThresholdChange={(value) => + handleInputChange('throughputForecastingDefaultAlertThresholdMs', value) + } + /> )} {activeCategory === 'mcpTokens' && ( From 36f392f5d06ab4661eb67bb4b91ee7befea9bdd0 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Fri, 27 Mar 2026 15:41:41 +0200 Subject: [PATCH 03/20] Modularize throughput forecasting components and utilities Split throughput forecasting page into reusable components (`ForecastCard`, `ThroughputChart`, `SettingsPanel`, etc.) and utility functions to improve code organization and maintainability. Updated imports to use shared types from `@betterdb/shared`. --- apps/web/src/api/metrics.ts | 2 +- .../pages/throughput-forecasting/Disabled.tsx | 33 ++ .../throughput-forecasting/ForecastCard.tsx | 51 +++ .../InsufficientData.tsx | 12 + .../pages/throughput-forecasting/Loading.tsx | 8 + .../throughput-forecasting/SettingsPanel.tsx | 72 ++++ .../ThroughputChart.tsx | 141 ++++++++ .../pages/throughput-forecasting/index.ts | 7 + .../pages/throughput-forecasting/utils.ts | 24 ++ apps/web/src/hooks/usePolling.ts | 13 +- apps/web/src/pages/ThroughputForecasting.tsx | 323 ++---------------- 11 files changed, 396 insertions(+), 290 deletions(-) create mode 100644 apps/web/src/components/pages/throughput-forecasting/Disabled.tsx create mode 100644 apps/web/src/components/pages/throughput-forecasting/ForecastCard.tsx create mode 100644 apps/web/src/components/pages/throughput-forecasting/InsufficientData.tsx create mode 100644 apps/web/src/components/pages/throughput-forecasting/Loading.tsx create mode 100644 apps/web/src/components/pages/throughput-forecasting/SettingsPanel.tsx create mode 100644 apps/web/src/components/pages/throughput-forecasting/ThroughputChart.tsx create mode 100644 apps/web/src/components/pages/throughput-forecasting/index.ts create mode 100644 apps/web/src/components/pages/throughput-forecasting/utils.ts diff --git a/apps/web/src/api/metrics.ts b/apps/web/src/api/metrics.ts index bdcff1f2..3f8dd400 100644 --- a/apps/web/src/api/metrics.ts +++ b/apps/web/src/api/metrics.ts @@ -1,5 +1,5 @@ import { fetchApi } from './client'; -import type { ThroughputForecast, ThroughputSettings, ThroughputSettingsUpdate } from '../types/throughput'; +import type { ThroughputForecast, ThroughputSettings, ThroughputSettingsUpdate } from '@betterdb/shared'; import type { HealthResponse, InfoResponse, diff --git a/apps/web/src/components/pages/throughput-forecasting/Disabled.tsx b/apps/web/src/components/pages/throughput-forecasting/Disabled.tsx new file mode 100644 index 00000000..8de97b74 --- /dev/null +++ b/apps/web/src/components/pages/throughput-forecasting/Disabled.tsx @@ -0,0 +1,33 @@ +import { Card } from '../../ui/card.tsx'; +import { SettingsPanel } from './SettingsPanel.tsx'; +import { ThroughputSettings, ThroughputSettingsUpdate } from '@betterdb/shared'; + +export const Disabled = ({ + updateSetting, + settings, + saveStatus, +}: { + updateSetting: (updates: ThroughputSettingsUpdate) => void; + settings: ThroughputSettings; + saveStatus: 'idle' | 'saved' | 'error'; +}) => { + return ( +
+

Throughput Forecast

+ +
+

+ Throughput forecasting is disabled for this connection. +

+ +
+
+ +
+ ); +}; diff --git a/apps/web/src/components/pages/throughput-forecasting/ForecastCard.tsx b/apps/web/src/components/pages/throughput-forecasting/ForecastCard.tsx new file mode 100644 index 00000000..11f03cfc --- /dev/null +++ b/apps/web/src/components/pages/throughput-forecasting/ForecastCard.tsx @@ -0,0 +1,51 @@ +import type { ThroughputForecast } from '@betterdb/shared'; +import { Card } from '../../ui/card.tsx'; +import { formatOps } from './utils.ts'; + +export function ForecastCard({ forecast }: { forecast: ThroughputForecast }) { + const directionArrow = + forecast.trendDirection === 'rising' + ? '\u2197' + : forecast.trendDirection === 'falling' + ? '\u2198' + : '\u2192'; + + return ( + +

+ {forecast.mode === 'forecast' ? 'Throughput Forecast' : 'Throughput Trend'} +

+ + {forecast.mode === 'forecast' && forecast.timeToLimitHuman && ( +

{forecast.timeToLimitHuman}

+ )} + +
+
+

Current

+

{formatOps(forecast.currentOpsPerSec)} ops/sec

+
+ {forecast.opsCeiling && ( +
+

Ceiling

+

{formatOps(forecast.opsCeiling)} ops/sec

+
+ )} +
+

Growth Rate

+

+ {forecast.growthRate >= 0 ? '+' : ''} + {formatOps(Math.round(forecast.growthRate))}/hr +

+
+
+

Trend

+

+ {directionArrow} {forecast.trendDirection} ({forecast.growthPercent >= 0 ? '+' : ''} + {forecast.growthPercent.toFixed(1)}%) +

+
+
+
+ ); +} diff --git a/apps/web/src/components/pages/throughput-forecasting/InsufficientData.tsx b/apps/web/src/components/pages/throughput-forecasting/InsufficientData.tsx new file mode 100644 index 00000000..f1ddef76 --- /dev/null +++ b/apps/web/src/components/pages/throughput-forecasting/InsufficientData.tsx @@ -0,0 +1,12 @@ +import { Card } from '../../ui/card.tsx'; +import { formatOps } from './utils.ts'; +import { ThroughputForecast } from '@betterdb/shared'; + +export const InsufficientData = ({ forecast }: { forecast: ThroughputForecast }) => ( + +

{forecast.insufficientDataMessage}

+ {forecast.currentOpsPerSec > 0 && ( +

{formatOps(forecast.currentOpsPerSec)} ops/sec

+ )} +
+); diff --git a/apps/web/src/components/pages/throughput-forecasting/Loading.tsx b/apps/web/src/components/pages/throughput-forecasting/Loading.tsx new file mode 100644 index 00000000..ce55bc7b --- /dev/null +++ b/apps/web/src/components/pages/throughput-forecasting/Loading.tsx @@ -0,0 +1,8 @@ +export const Loading = () => { + return ( +
+

Throughput Forecast

+

Loading...

+
+ ); +}; diff --git a/apps/web/src/components/pages/throughput-forecasting/SettingsPanel.tsx b/apps/web/src/components/pages/throughput-forecasting/SettingsPanel.tsx new file mode 100644 index 00000000..b0c8dd9c --- /dev/null +++ b/apps/web/src/components/pages/throughput-forecasting/SettingsPanel.tsx @@ -0,0 +1,72 @@ +import type { ThroughputSettings, ThroughputSettingsUpdate } from '@betterdb/shared'; +import { Card } from '../../ui/card.tsx'; +import { ALERT_PRESETS, WINDOW_PRESETS } from './utils.ts'; + +export function SettingsPanel({ + settings, + onUpdate, + saveStatus, +}: { + settings: ThroughputSettings; + onUpdate: (u: ThroughputSettingsUpdate) => void; + saveStatus: 'idle' | 'saved' | 'error'; +}) { + return ( + +
+

Settings

+
+ {saveStatus === 'saved' && Saved} + {saveStatus === 'error' && Error saving} +
+
+
+
+ + +
+ +
+ + + onUpdate({ opsCeiling: e.target.value ? parseInt(e.target.value) : null }) + } + className="w-full px-3 py-2 border rounded-md" + /> +
+ +
+ + +
+
+
+ ); +} diff --git a/apps/web/src/components/pages/throughput-forecasting/ThroughputChart.tsx b/apps/web/src/components/pages/throughput-forecasting/ThroughputChart.tsx new file mode 100644 index 00000000..97fbf82d --- /dev/null +++ b/apps/web/src/components/pages/throughput-forecasting/ThroughputChart.tsx @@ -0,0 +1,141 @@ +import { ThroughputForecast, ThroughputSettings } from '@betterdb/shared'; +import { useMemo } from 'react'; +import { formatOps, formatTime } from './utils.ts'; +import { Card } from '../../ui/card.tsx'; +import { + CartesianGrid, + Line, + LineChart, + ReferenceLine, + ResponsiveContainer, + Tooltip, + XAxis, + YAxis, +} from 'recharts'; + +export function ThroughputChart({ + chartData, + forecast, + settings, +}: { + chartData: Array<{ time: number; ops: number; label: string }>; + forecast: ThroughputForecast; + settings: ThroughputSettings; +}) { + const merged = useMemo(() => { + // Build trend line data + const trendData: Array<{ time: number; trend: number; label: string }> = []; + if (chartData.length >= 2 && forecast.growthRate !== 0) { + const firstTime = chartData[0].time; + const lastTime = chartData[chartData.length - 1].time; + const now = Date.now(); + // Extend forward: to ceiling or 2x window, whichever is sooner + const extendMs = + settings.opsCeiling !== null && + forecast.timeToLimitMs !== null && + forecast.timeToLimitMs > 0 + ? Math.min(forecast.timeToLimitMs, settings.rollingWindowMs) + : settings.rollingWindowMs; + const endTime = now + extendMs; + + // Use regression to compute trend values + const slopePerMs = forecast.growthRate / 3_600_000; + const lastOps = chartData[chartData.length - 1].ops; + const intercept = lastOps - slopePerMs * lastTime; + + // Historical portion of trend + trendData.push({ + time: firstTime, + trend: slopePerMs * firstTime + intercept, + label: formatTime(firstTime), + }); + trendData.push({ + time: lastTime, + trend: slopePerMs * lastTime + intercept, + label: formatTime(lastTime), + }); + // Projected portion + if (endTime > lastTime) { + trendData.push({ + time: endTime, + trend: slopePerMs * endTime + intercept, + label: formatTime(endTime), + }); + } + } + + // Merge for common x-axis + const allTimes = new Set([...chartData.map((d) => d.time), ...trendData.map((d) => d.time)]); + return [...allTimes] + .sort((a, b) => a - b) + .map((t) => { + const dataPoint = chartData.find((d) => d.time === t); + const trendPoint = trendData.find((d) => d.time === t); + return { + time: t, + label: formatTime(t), + ops: dataPoint?.ops ?? undefined, + trend: trendPoint?.trend ?? undefined, + }; + }); + }, [ + chartData, + forecast.growthRate, + forecast.timeToLimitMs, + settings.opsCeiling, + settings.rollingWindowMs, + ]); + + if (chartData.length === 0) { + return null; + } + + return ( + +

Ops/sec History

+ + + + + + [formatOps(Number(value)), '']} + labelFormatter={(label) => String(label)} + /> + + + {settings.opsCeiling !== null && ( + + )} + + +
+ ); +} diff --git a/apps/web/src/components/pages/throughput-forecasting/index.ts b/apps/web/src/components/pages/throughput-forecasting/index.ts new file mode 100644 index 00000000..9452f783 --- /dev/null +++ b/apps/web/src/components/pages/throughput-forecasting/index.ts @@ -0,0 +1,7 @@ +export * from './ThroughputChart'; +export * from './SettingsPanel'; +export * from './Loading'; +export * from './ForecastCard'; +export * from './InsufficientData'; +export * from './Disabled'; +export * from './utils'; diff --git a/apps/web/src/components/pages/throughput-forecasting/utils.ts b/apps/web/src/components/pages/throughput-forecasting/utils.ts new file mode 100644 index 00000000..9109c26b --- /dev/null +++ b/apps/web/src/components/pages/throughput-forecasting/utils.ts @@ -0,0 +1,24 @@ +export const WINDOW_PRESETS = [ + { label: '1h', value: 3600000 }, + { label: '3h', value: 10800000 }, + { label: '6h', value: 21600000 }, + { label: '12h', value: 43200000 }, + { label: '24h', value: 86400000 }, +]; + +export const ALERT_PRESETS = [ + { label: '30m', value: 1800000 }, + { label: '1h', value: 3600000 }, + { label: '2h', value: 7200000 }, + { label: '4h', value: 14400000 }, +]; + +export function formatOps(value: number): string { + if (value >= 1_000_000) return `${(value / 1_000_000).toFixed(1)}M`; + if (value >= 1_000) return `${(value / 1_000).toFixed(1)}K`; + return value.toString(); +} + +export function formatTime(timestamp: number): string { + return new Date(timestamp).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' }); +} diff --git a/apps/web/src/hooks/usePolling.ts b/apps/web/src/hooks/usePolling.ts index 9286c477..c58abeb4 100644 --- a/apps/web/src/hooks/usePolling.ts +++ b/apps/web/src/hooks/usePolling.ts @@ -10,7 +10,12 @@ interface UsePollingOptions { refetchKey?: string | number; } -export function usePolling({ fetcher, interval = 5000, enabled = true, refetchKey }: UsePollingOptions) { +export function usePolling({ + fetcher, + interval = 5000, + enabled = true, + refetchKey, +}: UsePollingOptions) { const [data, setData] = useState(null); const [error, setError] = useState(null); const [loading, setLoading] = useState(true); @@ -38,7 +43,9 @@ export function usePolling({ fetcher, interval = 5000, enabled = true, refetc try { setError(null); - const result = await (fetcherRef.current as (signal?: AbortSignal) => Promise)(abortController.signal); + const result = await (fetcherRef.current as (signal?: AbortSignal) => Promise)( + abortController.signal, + ); if (!abortController.signal.aborted) { setData(result); @@ -62,7 +69,7 @@ export function usePolling({ fetcher, interval = 5000, enabled = true, refetc } }; - refresh(); + void refresh(); const timer = setInterval(refresh, interval); return () => { diff --git a/apps/web/src/pages/ThroughputForecasting.tsx b/apps/web/src/pages/ThroughputForecasting.tsx index e8b4568b..5eef7850 100644 --- a/apps/web/src/pages/ThroughputForecasting.tsx +++ b/apps/web/src/pages/ThroughputForecasting.tsx @@ -1,44 +1,21 @@ -import { useState, useEffect, useCallback, useRef } from 'react'; +import { useCallback, useEffect, useRef, useState } from 'react'; import { useConnection } from '../hooks/useConnection'; import { usePolling } from '../hooks/usePolling'; import { metricsApi } from '../api/metrics'; -import { Card } from '../components/ui/card'; -import type { ThroughputForecast, ThroughputSettings, ThroughputSettingsUpdate } from '../types/throughput'; +import type { + ThroughputForecast, + ThroughputSettings, + ThroughputSettingsUpdate, +} from '@betterdb/shared'; import { - LineChart, - Line, - XAxis, - YAxis, - Tooltip, - ReferenceLine, - ResponsiveContainer, - CartesianGrid, -} from 'recharts'; - -const WINDOW_PRESETS = [ - { label: '1h', value: 3600000 }, - { label: '3h', value: 10800000 }, - { label: '6h', value: 21600000 }, - { label: '12h', value: 43200000 }, - { label: '24h', value: 86400000 }, -]; - -const ALERT_PRESETS = [ - { label: '30m', value: 1800000 }, - { label: '1h', value: 3600000 }, - { label: '2h', value: 7200000 }, - { label: '4h', value: 14400000 }, -]; - -function formatOps(value: number): string { - if (value >= 1_000_000) return `${(value / 1_000_000).toFixed(1)}M`; - if (value >= 1_000) return `${(value / 1_000).toFixed(1)}K`; - return value.toString(); -} - -function formatTime(timestamp: number): string { - return new Date(timestamp).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' }); -} + ForecastCard, + formatTime, + Loading, + SettingsPanel, + ThroughputChart, + Disabled, + InsufficientData, +} from '../components/pages/throughput-forecasting'; export function ThroughputForecasting() { const { currentConnection } = useConnection(); @@ -56,11 +33,16 @@ export function ThroughputForecasting() { // Load settings useEffect(() => { - metricsApi.getThroughputSettings().then(setSettings).catch(() => {}); - }, [currentConnection?.id]); // eslint-disable-line react-hooks/exhaustive-deps + metricsApi + .getThroughputSettings() + .then(setSettings) + .catch(() => {}); + }, [currentConnection?.id]); // Chart data - const [chartData, setChartData] = useState>([]); + const [chartData, setChartData] = useState>( + [], + ); useEffect(() => { if (!settings) return; const now = Date.now(); @@ -68,22 +50,30 @@ export function ThroughputForecasting() { .getStoredMemorySnapshots({ startTime: now - settings.rollingWindowMs, limit: 1500 }) .then((snapshots) => { const sorted = [...snapshots].sort((a, b) => a.timestamp - b.timestamp); - setChartData(sorted.map((s) => ({ time: s.timestamp, ops: s.opsPerSec, label: formatTime(s.timestamp) }))); + setChartData( + sorted.map((s) => ({ + time: s.timestamp, + ops: s.opsPerSec, + label: formatTime(s.timestamp), + })), + ); }) .catch(() => {}); - }, [settings?.rollingWindowMs, currentConnection?.id, forecast]); + }, [currentConnection?.id, forecast, settings]); const updateSetting = useCallback( (updates: ThroughputSettingsUpdate) => { if (debounceTimeout.current) clearTimeout(debounceTimeout.current); - setSettings((prev: ThroughputSettings | null) => (prev ? { ...prev, ...updates, updatedAt: Date.now() } : prev)); + setSettings((prev: ThroughputSettings | null) => + prev ? { ...prev, ...updates, updatedAt: Date.now() } : prev, + ); debounceTimeout.current = setTimeout(async () => { try { const updated = await metricsApi.updateThroughputSettings(updates); setSettings(updated); setSaveStatus('saved'); - refreshForecast(); + void refreshForecast(); if (saveTimeout.current) clearTimeout(saveTimeout.current); saveTimeout.current = setTimeout(() => setSaveStatus('idle'), 2000); } catch { @@ -97,32 +87,11 @@ export function ThroughputForecasting() { // ── Page States ── if (!forecast || !settings) { - return ( -
-

Throughput Forecast

-

Loading...

-
- ); + return ; } if (!forecast.enabled) { - return ( -
-

Throughput Forecast

- -
-

Throughput forecasting is disabled for this connection.

- -
-
- -
- ); + return ; } return ( @@ -132,12 +101,7 @@ export function ThroughputForecasting() { {forecast.insufficientData ? ( - -

{forecast.insufficientDataMessage}

- {forecast.currentOpsPerSec > 0 && ( -

{formatOps(forecast.currentOpsPerSec)} ops/sec

- )} -
+ ) : ( <> @@ -147,216 +111,3 @@ export function ThroughputForecasting() {
); } - -// ── Settings Panel ── - -function SettingsPanel({ - settings, - onUpdate, - saveStatus, -}: { - settings: ThroughputSettings; - onUpdate: (u: ThroughputSettingsUpdate) => void; - saveStatus: 'idle' | 'saved' | 'error'; -}) { - return ( - -
-

Settings

-
- {saveStatus === 'saved' && Saved} - {saveStatus === 'error' && Error saving} -
-
-
-
- - -
- -
- - onUpdate({ opsCeiling: e.target.value ? parseInt(e.target.value) : null })} - className="w-full px-3 py-2 border rounded-md" - /> -
- -
- - -
-
-
- ); -} - -// ── Forecast Card ── - -function ForecastCard({ forecast }: { forecast: ThroughputForecast }) { - const directionArrow = forecast.trendDirection === 'rising' ? '\u2197' : forecast.trendDirection === 'falling' ? '\u2198' : '\u2192'; - - return ( - -

- {forecast.mode === 'forecast' ? 'Throughput Forecast' : 'Throughput Trend'} -

- - {forecast.mode === 'forecast' && forecast.timeToLimitHuman && ( -

{forecast.timeToLimitHuman}

- )} - -
-
-

Current

-

{formatOps(forecast.currentOpsPerSec)} ops/sec

-
- {forecast.opsCeiling && ( -
-

Ceiling

-

{formatOps(forecast.opsCeiling)} ops/sec

-
- )} -
-

Growth Rate

-

{forecast.growthRate >= 0 ? '+' : ''}{formatOps(Math.round(forecast.growthRate))}/hr

-
-
-

Trend

-

- {directionArrow} {forecast.trendDirection} ({forecast.growthPercent >= 0 ? '+' : ''}{forecast.growthPercent.toFixed(1)}%) -

-
-
-
- ); -} - -// ── Chart ── - -function ThroughputChart({ - chartData, - forecast, - settings, -}: { - chartData: Array<{ time: number; ops: number; label: string }>; - forecast: ThroughputForecast; - settings: ThroughputSettings; -}) { - if (chartData.length === 0) return null; - - // Build trend line data - const trendData: Array<{ time: number; trend: number; label: string }> = []; - if (chartData.length >= 2 && forecast.growthRate !== 0) { - const firstTime = chartData[0].time; - const lastTime = chartData[chartData.length - 1].time; - const now = Date.now(); - // Extend forward: to ceiling or 2x window, whichever is sooner - const extendMs = settings.opsCeiling !== null && forecast.timeToLimitMs !== null && forecast.timeToLimitMs > 0 - ? Math.min(forecast.timeToLimitMs, settings.rollingWindowMs) - : settings.rollingWindowMs; - const endTime = now + extendMs; - - // Use regression to compute trend values - const slopePerMs = forecast.growthRate / 3_600_000; - const lastOps = chartData[chartData.length - 1].ops; - const intercept = lastOps - slopePerMs * lastTime; - - // Historical portion of trend - trendData.push({ time: firstTime, trend: slopePerMs * firstTime + intercept, label: formatTime(firstTime) }); - trendData.push({ time: lastTime, trend: slopePerMs * lastTime + intercept, label: formatTime(lastTime) }); - // Projected portion - if (endTime > lastTime) { - trendData.push({ time: endTime, trend: slopePerMs * endTime + intercept, label: formatTime(endTime) }); - } - } - - // Merge for common x-axis - const allTimes = new Set([...chartData.map((d) => d.time), ...trendData.map((d) => d.time)]); - const merged = [...allTimes] - .sort((a, b) => a - b) - .map((t) => { - const dataPoint = chartData.find((d) => d.time === t); - const trendPoint = trendData.find((d) => d.time === t); - return { - time: t, - label: formatTime(t), - ops: dataPoint?.ops ?? undefined, - trend: trendPoint?.trend ?? undefined, - }; - }); - - return ( - -

Ops/sec History

- - - - - - [formatOps(Number(value)), '']} - labelFormatter={(label) => String(label)} - /> - - - {settings.opsCeiling !== null && ( - - )} - - -
- ); -} From 5cb015ed0ac145d7a0e670abedf1108b2d06e217 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Fri, 27 Mar 2026 15:44:31 +0200 Subject: [PATCH 04/20] Update SettingsPanel heading and adjust component placement in ThroughputForecasting page --- .../components/pages/throughput-forecasting/SettingsPanel.tsx | 2 +- apps/web/src/pages/ThroughputForecasting.tsx | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/apps/web/src/components/pages/throughput-forecasting/SettingsPanel.tsx b/apps/web/src/components/pages/throughput-forecasting/SettingsPanel.tsx index b0c8dd9c..03e0577c 100644 --- a/apps/web/src/components/pages/throughput-forecasting/SettingsPanel.tsx +++ b/apps/web/src/components/pages/throughput-forecasting/SettingsPanel.tsx @@ -14,7 +14,7 @@ export function SettingsPanel({ return (
-

Settings

+

Instance Settings

{saveStatus === 'saved' && Saved} {saveStatus === 'error' && Error saving} diff --git a/apps/web/src/pages/ThroughputForecasting.tsx b/apps/web/src/pages/ThroughputForecasting.tsx index 5eef7850..acf4cc03 100644 --- a/apps/web/src/pages/ThroughputForecasting.tsx +++ b/apps/web/src/pages/ThroughputForecasting.tsx @@ -98,8 +98,6 @@ export function ThroughputForecasting() {

Throughput Forecast

- - {forecast.insufficientData ? ( ) : ( @@ -108,6 +106,7 @@ export function ThroughputForecasting() { )} +
); } From 6b2c15ef2256cb5699772ae0fba680110ad72bae Mon Sep 17 00:00:00 2001 From: jamby77 Date: Fri, 27 Mar 2026 15:45:27 +0200 Subject: [PATCH 05/20] Update SettingsPanel heading and adjust component placement in ThroughputForecasting page --- apps/web/src/types/throughput.ts | 32 -------------------------------- 1 file changed, 32 deletions(-) delete mode 100644 apps/web/src/types/throughput.ts diff --git a/apps/web/src/types/throughput.ts b/apps/web/src/types/throughput.ts deleted file mode 100644 index 7a4f3f26..00000000 --- a/apps/web/src/types/throughput.ts +++ /dev/null @@ -1,32 +0,0 @@ -export interface ThroughputSettings { - connectionId: string; - enabled: boolean; - opsCeiling: number | null; - rollingWindowMs: number; - alertThresholdMs: number; - updatedAt: number; -} - -export interface ThroughputForecast { - connectionId: string; - mode: 'trend' | 'forecast'; - currentOpsPerSec: number; - growthRate: number; - growthPercent: number; - trendDirection: 'rising' | 'falling' | 'stable'; - dataPointCount: number; - windowMs: number; - opsCeiling: number | null; - timeToLimitMs: number | null; - timeToLimitHuman: string; - enabled: boolean; - insufficientData: boolean; - insufficientDataMessage?: string; -} - -export interface ThroughputSettingsUpdate { - enabled?: boolean; - opsCeiling?: number | null; - rollingWindowMs?: number; - alertThresholdMs?: number; -} From 72f69856bedc6999d921a14e2da13250e2134657 Mon Sep 17 00:00:00 2001 From: Petar Dzhambazov Date: Tue, 31 Mar 2026 14:35:47 +0300 Subject: [PATCH 06/20] Migrate forecasting hooks to TanStack Query (#70) Replaced custom polling hooks with `react-query` for data fetching, caching, and synchronization in throughput forecasting workflows. Adjusted API calls, updated components to use `react-query` hooks, and removed redundant `usePolling` implementation. Enhanced error handling and refetch mechanisms for improved user experience. Extract data-fetching logic from page components into dedicated hooks using @tanstack/react-query for caching, deduplication, and polling. Add Vitest test infrastructure and unit tests for all new hooks. --- .env.example | 2 +- .../src/storage/adapters/sqlite.adapter.ts | 1 + .../throughput-forecasting.service.ts | 2 +- apps/web/package.json | 8 +- .../InsufficientData.tsx | 8 +- apps/web/src/hooks/useBaselineHotKeys.test.ts | 77 ++++++ apps/web/src/hooks/useBaselineHotKeys.ts | 30 +++ apps/web/src/hooks/useLatencyDoctor.test.ts | 40 +++ apps/web/src/hooks/useLatencyDoctor.ts | 12 + apps/web/src/hooks/useLatencyHistory.test.ts | 37 +++ apps/web/src/hooks/useLatencyHistory.ts | 10 + apps/web/src/hooks/useLicense.test.ts | 53 ++++ apps/web/src/hooks/useLicense.ts | 24 +- apps/web/src/hooks/useMcpTokens.test.ts | 50 ++++ apps/web/src/hooks/useMcpTokens.ts | 26 ++ apps/web/src/hooks/usePolling.ts | 103 +++----- .../web/src/hooks/useStoredCommandLog.test.ts | 62 +++++ apps/web/src/hooks/useStoredCommandLog.ts | 44 ++++ .../hooks/useStoredCommandLogPatterns.test.ts | 39 +++ .../src/hooks/useStoredCommandLogPatterns.ts | 26 ++ apps/web/src/hooks/useStoredLatency.test.ts | 75 ++++++ apps/web/src/hooks/useStoredLatency.ts | 38 +++ .../hooks/useStoredMemorySnapshots.test.ts | 66 +++++ .../web/src/hooks/useStoredMemorySnapshots.ts | 24 ++ apps/web/src/hooks/useStoredSlowLog.test.ts | 32 +++ apps/web/src/hooks/useStoredSlowLog.ts | 22 ++ apps/web/src/hooks/useVersionCheck.test.ts | 112 +++++++++ apps/web/src/hooks/useVersionCheck.ts | 103 +++----- apps/web/src/main.tsx | 15 +- apps/web/src/pages/Dashboard.tsx | 24 +- apps/web/src/pages/KeyAnalytics.tsx | 28 +-- apps/web/src/pages/Latency.tsx | 73 ++---- apps/web/src/pages/Settings.tsx | 28 +-- apps/web/src/pages/SlowLog.tsx | 124 +++------ apps/web/src/pages/ThroughputForecasting.tsx | 114 ++++----- apps/web/src/test/setup.ts | 1 + apps/web/src/test/test-utils.tsx | 42 ++++ apps/web/vitest.config.ts | 18 ++ pnpm-lock.yaml | 216 +++++++++++++++- scripts/demo/throughput-ramp.sh | 236 ++++++++++++++++++ 40 files changed, 1617 insertions(+), 428 deletions(-) create mode 100644 apps/web/src/hooks/useBaselineHotKeys.test.ts create mode 100644 apps/web/src/hooks/useBaselineHotKeys.ts create mode 100644 apps/web/src/hooks/useLatencyDoctor.test.ts create mode 100644 apps/web/src/hooks/useLatencyDoctor.ts create mode 100644 apps/web/src/hooks/useLatencyHistory.test.ts create mode 100644 apps/web/src/hooks/useLatencyHistory.ts create mode 100644 apps/web/src/hooks/useLicense.test.ts create mode 100644 apps/web/src/hooks/useMcpTokens.test.ts create mode 100644 apps/web/src/hooks/useMcpTokens.ts create mode 100644 apps/web/src/hooks/useStoredCommandLog.test.ts create mode 100644 apps/web/src/hooks/useStoredCommandLog.ts create mode 100644 apps/web/src/hooks/useStoredCommandLogPatterns.test.ts create mode 100644 apps/web/src/hooks/useStoredCommandLogPatterns.ts create mode 100644 apps/web/src/hooks/useStoredLatency.test.ts create mode 100644 apps/web/src/hooks/useStoredLatency.ts create mode 100644 apps/web/src/hooks/useStoredMemorySnapshots.test.ts create mode 100644 apps/web/src/hooks/useStoredMemorySnapshots.ts create mode 100644 apps/web/src/hooks/useStoredSlowLog.test.ts create mode 100644 apps/web/src/hooks/useStoredSlowLog.ts create mode 100644 apps/web/src/hooks/useVersionCheck.test.ts create mode 100644 apps/web/src/test/setup.ts create mode 100644 apps/web/src/test/test-utils.tsx create mode 100644 apps/web/vitest.config.ts create mode 100755 scripts/demo/throughput-ramp.sh diff --git a/.env.example b/.env.example index 0573b60c..8ff17beb 100644 --- a/.env.example +++ b/.env.example @@ -6,7 +6,7 @@ DB_TYPE=auto # Storage Configuration STORAGE_TYPE=sqlite -STORAGE_SQLITE_PATH=./data/audit.db +STORAGE_SQLITE_FILEPATH=./data/audit.db # Audit Trail Configuration AUDIT_POLL_INTERVAL_MS=60000 diff --git a/apps/api/src/storage/adapters/sqlite.adapter.ts b/apps/api/src/storage/adapters/sqlite.adapter.ts index 91ce3b4b..a6b9e6fe 100644 --- a/apps/api/src/storage/adapters/sqlite.adapter.ts +++ b/apps/api/src/storage/adapters/sqlite.adapter.ts @@ -38,6 +38,7 @@ import { DatabaseConnectionConfig, HotKeyEntry, HotKeyQueryOptions, + StoredLatencyHistogram, } from '../../common/interfaces/storage-port.interface'; import type { VectorIndexSnapshot, VectorIndexSnapshotQueryOptions } from '@betterdb/shared'; import { SqliteDialect, RowMappers } from './base-sql.adapter'; diff --git a/apps/api/src/throughput-forecasting/throughput-forecasting.service.ts b/apps/api/src/throughput-forecasting/throughput-forecasting.service.ts index de6d4aad..1e4b79e0 100644 --- a/apps/api/src/throughput-forecasting/throughput-forecasting.service.ts +++ b/apps/api/src/throughput-forecasting/throughput-forecasting.service.ts @@ -299,7 +299,7 @@ export class ThroughputForecastingService implements OnModuleInit, OnModuleDestr enabled: true, insufficientData: true, insufficientDataMessage: - 'Need at least 30 minutes of monitoring history. Data will be available shortly.', + 'Data will be available shortly. At least 30 minutes of monitoring history required.', }; } diff --git a/apps/web/package.json b/apps/web/package.json index 1a9d3e2c..e1497f14 100644 --- a/apps/web/package.json +++ b/apps/web/package.json @@ -15,6 +15,7 @@ "dependencies": { "@betterdb/shared": "workspace:*", "@radix-ui/react-tabs": "^1.1.13", + "@tanstack/react-query": "^5.95.2", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "d3": "^7.9.0", @@ -33,6 +34,8 @@ "@react-grab/mcp": "^0.1.28", "@rollup/plugin-commonjs": "^29.0.2", "@tailwindcss/vite": "^4.2.2", + "@testing-library/jest-dom": "^6.9.1", + "@testing-library/react": "^16.3.2", "@types/d3": "^7.4.3", "@types/node": "^22.19.15", "@types/react": "^19.2.14", @@ -42,11 +45,12 @@ "eslint-plugin-react-hooks": "^7.0.1", "eslint-plugin-react-refresh": "^0.5.2", "globals": "^17.4.0", + "happy-dom": "^20.8.9", "react-grab": "^0.1.28", "tailwindcss": "^4.2.2", "typescript": "^5.9.3", "typescript-eslint": "^8.57.2", - "vitest": "^4.1.1", - "vite": "^8.0.2" + "vite": "^8.0.2", + "vitest": "^4.1.1" } } diff --git a/apps/web/src/components/pages/throughput-forecasting/InsufficientData.tsx b/apps/web/src/components/pages/throughput-forecasting/InsufficientData.tsx index f1ddef76..d0425e8e 100644 --- a/apps/web/src/components/pages/throughput-forecasting/InsufficientData.tsx +++ b/apps/web/src/components/pages/throughput-forecasting/InsufficientData.tsx @@ -3,10 +3,12 @@ import { formatOps } from './utils.ts'; import { ThroughputForecast } from '@betterdb/shared'; export const InsufficientData = ({ forecast }: { forecast: ThroughputForecast }) => ( - -

{forecast.insufficientDataMessage}

+ +

{forecast.insufficientDataMessage}

{forecast.currentOpsPerSec > 0 && ( -

{formatOps(forecast.currentOpsPerSec)} ops/sec

+

+ {formatOps(forecast.currentOpsPerSec)} ops/sec +

)}
); diff --git a/apps/web/src/hooks/useBaselineHotKeys.test.ts b/apps/web/src/hooks/useBaselineHotKeys.test.ts new file mode 100644 index 00000000..4a018b8b --- /dev/null +++ b/apps/web/src/hooks/useBaselineHotKeys.test.ts @@ -0,0 +1,77 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { renderHookWithQuery, waitFor } from '../test/test-utils'; +import type { HotKeyEntry } from '@betterdb/shared'; + +vi.mock('../api/keyAnalytics', () => ({ + keyAnalyticsApi: { + getHotKeys: vi.fn(), + }, +})); + +import { keyAnalyticsApi } from '../api/keyAnalytics'; +import { useBaselineHotKeys } from './useBaselineHotKeys'; + +const mockGetHotKeys = vi.mocked(keyAnalyticsApi.getHotKeys); + +beforeEach(() => { + vi.clearAllMocks(); +}); + +describe('useBaselineHotKeys', () => { + it('does not fetch when time range is not set', () => { + renderHookWithQuery(() => + useBaselineHotKeys({ connectionId: 'test' }), + ); + expect(mockGetHotKeys).not.toHaveBeenCalled(); + }); + + it('does not fetch when disabled', () => { + renderHookWithQuery(() => + useBaselineHotKeys({ connectionId: 'test', startTime: 1000, endTime: 2000, enabled: false }), + ); + expect(mockGetHotKeys).not.toHaveBeenCalled(); + }); + + it('fetches baseline hot keys with oldest flag', async () => { + const mockEntries: HotKeyEntry[] = [ + { + id: 'hk-1', + keyName: 'foo', + connectionId: 'test', + capturedAt: 1000, + signalType: 'lfu', + rank: 1, + }, + ]; + mockGetHotKeys.mockResolvedValue(mockEntries); + + const { result } = renderHookWithQuery(() => + useBaselineHotKeys({ connectionId: 'test', startTime: 1000, endTime: 2000 }), + ); + + await waitFor(() => { + expect(result.current.data).toEqual(mockEntries); + }); + + expect(mockGetHotKeys).toHaveBeenCalledWith({ + limit: 50, + startTime: 1000, + endTime: 2000, + oldest: true, + }); + }); + + it('returns null when no entries found', async () => { + mockGetHotKeys.mockResolvedValue([]); + + const { result } = renderHookWithQuery(() => + useBaselineHotKeys({ connectionId: 'test', startTime: 1000, endTime: 2000 }), + ); + + await waitFor(() => { + expect(result.current.isFetched).toBe(true); + }); + + expect(result.current.data).toBeNull(); + }); +}); diff --git a/apps/web/src/hooks/useBaselineHotKeys.ts b/apps/web/src/hooks/useBaselineHotKeys.ts new file mode 100644 index 00000000..45309a6c --- /dev/null +++ b/apps/web/src/hooks/useBaselineHotKeys.ts @@ -0,0 +1,30 @@ +import { useQuery } from '@tanstack/react-query'; +import { keyAnalyticsApi } from '../api/keyAnalytics'; + +interface UseBaselineHotKeysOptions { + connectionId?: string; + startTime?: number; + endTime?: number; + enabled?: boolean; +} + +export function useBaselineHotKeys({ + connectionId, + startTime, + endTime, + enabled = true, +}: UseBaselineHotKeysOptions) { + return useQuery({ + queryKey: ['hot-keys-baseline', connectionId, startTime, endTime], + queryFn: async () => { + const entries = await keyAnalyticsApi.getHotKeys({ + limit: 50, + startTime, + endTime, + oldest: true, + }); + return entries.length > 0 ? entries : null; + }, + enabled: enabled && startTime !== undefined && endTime !== undefined, + }); +} diff --git a/apps/web/src/hooks/useLatencyDoctor.test.ts b/apps/web/src/hooks/useLatencyDoctor.test.ts new file mode 100644 index 00000000..7b0a5021 --- /dev/null +++ b/apps/web/src/hooks/useLatencyDoctor.test.ts @@ -0,0 +1,40 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { renderHookWithQuery, waitFor } from '../test/test-utils'; + +vi.mock('../api/metrics', () => ({ + metricsApi: { + getLatencyDoctor: vi.fn(), + }, +})); + +import { metricsApi } from '../api/metrics'; +import { useLatencyDoctor } from './useLatencyDoctor'; + +const mockGetDoctor = vi.mocked(metricsApi.getLatencyDoctor); + +beforeEach(() => { + vi.clearAllMocks(); +}); + +describe('useLatencyDoctor', () => { + it('fetches doctor report', async () => { + const report = { summary: 'All good', issues: [] }; + mockGetDoctor.mockResolvedValue({ report }); + + const { result } = renderHookWithQuery(() => useLatencyDoctor('test-conn')); + + await waitFor(() => { + expect(result.current.data).toEqual(report); + }); + }); + + it('returns error on failure', async () => { + mockGetDoctor.mockRejectedValue(new Error('Server error')); + + const { result } = renderHookWithQuery(() => useLatencyDoctor('test-conn')); + + await waitFor(() => { + expect(result.current.error).toBeInstanceOf(Error); + }); + }); +}); diff --git a/apps/web/src/hooks/useLatencyDoctor.ts b/apps/web/src/hooks/useLatencyDoctor.ts new file mode 100644 index 00000000..44de9f2b --- /dev/null +++ b/apps/web/src/hooks/useLatencyDoctor.ts @@ -0,0 +1,12 @@ +import { useQuery } from '@tanstack/react-query'; +import { metricsApi } from '../api/metrics'; + +export function useLatencyDoctor(connectionId?: string) { + return useQuery({ + queryKey: ['latency-doctor', connectionId], + queryFn: async () => { + const data = await metricsApi.getLatencyDoctor(); + return data.report; + }, + }); +} diff --git a/apps/web/src/hooks/useLatencyHistory.test.ts b/apps/web/src/hooks/useLatencyHistory.test.ts new file mode 100644 index 00000000..f005d9c4 --- /dev/null +++ b/apps/web/src/hooks/useLatencyHistory.test.ts @@ -0,0 +1,37 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { renderHookWithQuery, waitFor } from '../test/test-utils'; + +vi.mock('../api/metrics', () => ({ + metricsApi: { + getLatencyHistory: vi.fn(), + }, +})); + +import { metricsApi } from '../api/metrics'; +import { useLatencyHistory } from './useLatencyHistory'; + +const mockGetHistory = vi.mocked(metricsApi.getLatencyHistory); + +beforeEach(() => { + vi.clearAllMocks(); +}); + +describe('useLatencyHistory', () => { + it('does not fetch when no event selected', () => { + renderHookWithQuery(() => useLatencyHistory(null, 'test-conn')); + expect(mockGetHistory).not.toHaveBeenCalled(); + }); + + it('fetches history for selected event', async () => { + const mockData = [{ timestamp: 1000, latency: 5 }]; + mockGetHistory.mockResolvedValue(mockData); + + const { result } = renderHookWithQuery(() => useLatencyHistory('command.get', 'test-conn')); + + await waitFor(() => { + expect(result.current.data).toEqual(mockData); + }); + + expect(mockGetHistory).toHaveBeenCalledWith('command.get'); + }); +}); diff --git a/apps/web/src/hooks/useLatencyHistory.ts b/apps/web/src/hooks/useLatencyHistory.ts new file mode 100644 index 00000000..ab8efb52 --- /dev/null +++ b/apps/web/src/hooks/useLatencyHistory.ts @@ -0,0 +1,10 @@ +import { useQuery } from '@tanstack/react-query'; +import { metricsApi } from '../api/metrics'; + +export function useLatencyHistory(selectedEvent: string | null, connectionId?: string) { + return useQuery({ + queryKey: ['latency-history', connectionId, selectedEvent], + queryFn: () => metricsApi.getLatencyHistory(selectedEvent!), + enabled: !!selectedEvent, + }); +} diff --git a/apps/web/src/hooks/useLicense.test.ts b/apps/web/src/hooks/useLicense.test.ts new file mode 100644 index 00000000..72bf22d2 --- /dev/null +++ b/apps/web/src/hooks/useLicense.test.ts @@ -0,0 +1,53 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { renderHookWithQuery, waitFor } from '../test/test-utils'; + +vi.mock('../api/license', () => ({ + licenseApi: { + getStatus: vi.fn(), + }, +})); + +import { licenseApi } from '../api/license'; +import { useLicenseStatus } from './useLicense'; + +const mockGetStatus = vi.mocked(licenseApi.getStatus); + +beforeEach(() => { + vi.clearAllMocks(); +}); + +describe('useLicenseStatus', () => { + it('starts in loading state', () => { + mockGetStatus.mockReturnValue(new Promise(() => {})); // never resolves + const { result } = renderHookWithQuery(() => useLicenseStatus()); + expect(result.current.loading).toBe(true); + expect(result.current.license).toBeNull(); + }); + + it('returns license data on success', async () => { + const mockLicense = { tier: 'pro', features: ['anomaly-detection'], valid: true }; + mockGetStatus.mockResolvedValue(mockLicense); + + const { result } = renderHookWithQuery(() => useLicenseStatus()); + + await waitFor(() => { + expect(result.current.loading).toBe(false); + }); + + expect(result.current.license).toEqual(mockLicense); + expect(result.current.error).toBeNull(); + }); + + it('returns error on failure', async () => { + mockGetStatus.mockRejectedValue(new Error('Network error')); + + const { result } = renderHookWithQuery(() => useLicenseStatus()); + + await waitFor(() => { + expect(result.current.loading).toBe(false); + }); + + expect(result.current.license).toBeNull(); + expect(result.current.error).toBeInstanceOf(Error); + }); +}); diff --git a/apps/web/src/hooks/useLicense.ts b/apps/web/src/hooks/useLicense.ts index 791705a4..f9301bbf 100644 --- a/apps/web/src/hooks/useLicense.ts +++ b/apps/web/src/hooks/useLicense.ts @@ -1,4 +1,5 @@ -import { createContext, useContext, useState, useEffect } from 'react'; +import { createContext, useContext } from 'react'; +import { useQuery } from '@tanstack/react-query'; import { licenseApi, type LicenseStatus } from '../api/license'; import { Feature } from '@betterdb/shared'; @@ -15,17 +16,14 @@ export function useLicense() { } export function useLicenseStatus() { - const [license, setLicense] = useState(null); - const [loading, setLoading] = useState(true); - const [error, setError] = useState(null); + const { data, isLoading, error } = useQuery({ + queryKey: ['license-status'], + queryFn: () => licenseApi.getStatus(), + }); - useEffect(() => { - licenseApi - .getStatus() - .then(setLicense) - .catch(setError) - .finally(() => setLoading(false)); - }, []); - - return { license, loading, error }; + return { + license: data ?? null, + loading: isLoading, + error: error ?? null, + }; } diff --git a/apps/web/src/hooks/useMcpTokens.test.ts b/apps/web/src/hooks/useMcpTokens.test.ts new file mode 100644 index 00000000..77ada036 --- /dev/null +++ b/apps/web/src/hooks/useMcpTokens.test.ts @@ -0,0 +1,50 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { renderHookWithQuery, waitFor } from '../test/test-utils'; + +vi.mock('../api/agent-tokens', () => ({ + agentTokensApi: { + list: vi.fn(), + }, +})); + +import { agentTokensApi } from '../api/agent-tokens'; +import { useMcpTokens } from './useMcpTokens'; + +const mockList = vi.mocked(agentTokensApi.list); + +beforeEach(() => { + vi.clearAllMocks(); +}); + +describe('useMcpTokens', () => { + it('does not fetch when disabled', () => { + const { result } = renderHookWithQuery(() => useMcpTokens(false)); + expect(mockList).not.toHaveBeenCalled(); + expect(result.current.tokens).toEqual([]); + }); + + it('fetches tokens when enabled', async () => { + const mockTokens = [ + { id: '1', name: 'test-token', type: 'mcp', createdAt: '2024-01-01', lastUsed: null }, + ]; + mockList.mockResolvedValue(mockTokens); + + const { result } = renderHookWithQuery(() => useMcpTokens(true)); + + await waitFor(() => { + expect(result.current.tokens).toEqual(mockTokens); + }); + + expect(mockList).toHaveBeenCalledWith('mcp'); + }); + + it('returns error on failure', async () => { + mockList.mockRejectedValue(new Error('Unauthorized')); + + const { result } = renderHookWithQuery(() => useMcpTokens(true)); + + await waitFor(() => { + expect(result.current.error).toBeInstanceOf(Error); + }); + }); +}); diff --git a/apps/web/src/hooks/useMcpTokens.ts b/apps/web/src/hooks/useMcpTokens.ts new file mode 100644 index 00000000..549a61bb --- /dev/null +++ b/apps/web/src/hooks/useMcpTokens.ts @@ -0,0 +1,26 @@ +import { useCallback } from 'react'; +import { useQuery, useQueryClient } from '@tanstack/react-query'; +import { agentTokensApi, type TokenListItem } from '../api/agent-tokens'; + +const QUERY_KEY = ['mcp-tokens'] as const; + +export function useMcpTokens(enabled: boolean) { + const queryClient = useQueryClient(); + + const { data, isLoading, error } = useQuery({ + queryKey: QUERY_KEY, + queryFn: () => agentTokensApi.list('mcp'), + enabled, + }); + + const invalidate = useCallback(async () => { + await queryClient.invalidateQueries({ queryKey: QUERY_KEY }); + }, [queryClient]); + + return { + tokens: data ?? [], + loading: isLoading, + error: error ?? null, + invalidate, + }; +} diff --git a/apps/web/src/hooks/usePolling.ts b/apps/web/src/hooks/usePolling.ts index c58abeb4..593ef199 100644 --- a/apps/web/src/hooks/usePolling.ts +++ b/apps/web/src/hooks/usePolling.ts @@ -1,101 +1,62 @@ -import { useState, useEffect, useRef } from 'react'; +import { useQuery, useQueryClient } from '@tanstack/react-query'; +import { useCallback, useRef } from 'react'; import { PaymentRequiredError } from '../api/client'; import { useUpgradePrompt } from './useUpgradePrompt'; interface UsePollingOptions { - fetcher: ((signal?: AbortSignal) => Promise) | ((...args: any[]) => Promise); + fetcher: (signal: AbortSignal) => Promise; interval?: number; enabled?: boolean; /** Optional key that triggers a refetch when changed (e.g., filter parameters) */ refetchKey?: string | number; + /** Query key — provide explicitly to share cache across components. */ + queryKey?: readonly unknown[]; } +let keyCounter = 0; + export function usePolling({ fetcher, interval = 5000, enabled = true, refetchKey, + queryKey, }: UsePollingOptions) { - const [data, setData] = useState(null); - const [error, setError] = useState(null); - const [loading, setLoading] = useState(true); - const [lastUpdated, setLastUpdated] = useState(null); - const fetcherRef = useRef(fetcher); - const abortControllerRef = useRef(null); const { showUpgradePrompt } = useUpgradePrompt(); + const queryClient = useQueryClient(); + const fetcherRef = useRef(fetcher); + fetcherRef.current = fetcher; - useEffect(() => { - fetcherRef.current = fetcher; - }, [fetcher]); - - useEffect(() => { - if (!enabled) { - return; - } - - const refresh = async () => { - if (abortControllerRef.current) { - abortControllerRef.current.abort(); - } + // Stable key per hook instance, won't change across renders + const stableKeyRef = useRef(null); + if (stableKeyRef.current === null) { + stableKeyRef.current = ++keyCounter; + } - const abortController = new AbortController(); - abortControllerRef.current = abortController; + const resolvedKey = queryKey ?? ['polling', stableKeyRef.current, refetchKey]; + const { data, error, isLoading, dataUpdatedAt } = useQuery({ + queryKey: resolvedKey, + queryFn: async ({ signal }) => { try { - setError(null); - const result = await (fetcherRef.current as (signal?: AbortSignal) => Promise)( - abortController.signal, - ); - - if (!abortController.signal.aborted) { - setData(result); - setLastUpdated(new Date()); - } + return await fetcherRef.current(signal); } catch (e) { - if (e instanceof Error && e.name === 'AbortError') { - return; - } - if (e instanceof PaymentRequiredError) { showUpgradePrompt(e); - setError(e); - return; - } - setError(e instanceof Error ? e : new Error('Unknown error')); - } finally { - if (!abortController.signal.aborted) { - setLoading(false); } + throw e; } - }; + }, + enabled, + refetchInterval: interval, + refetchIntervalInBackground: false, + }); - void refresh(); - const timer = setInterval(refresh, interval); + const refresh = useCallback(async () => { + await queryClient.invalidateQueries({ queryKey: resolvedKey }); + }, [queryClient, resolvedKey]); - return () => { - clearInterval(timer); - if (abortControllerRef.current) { - abortControllerRef.current.abort(); - abortControllerRef.current = null; - } - }; - }, [interval, enabled, showUpgradePrompt, refetchKey]); - - const manualRefresh = async () => { - try { - setError(null); - const result = await fetcherRef.current(); - setData(result); - setLastUpdated(new Date()); - } catch (e) { - if (e instanceof PaymentRequiredError) { - showUpgradePrompt(e); - setError(e); - return; - } - setError(e instanceof Error ? e : new Error('Unknown error')); - } - }; + const lastUpdated = dataUpdatedAt ? new Date(dataUpdatedAt) : null; - return { data, error, loading, lastUpdated, refresh: manualRefresh }; + return { data: data ?? null, error, loading: isLoading, lastUpdated, refresh }; } diff --git a/apps/web/src/hooks/useStoredCommandLog.test.ts b/apps/web/src/hooks/useStoredCommandLog.test.ts new file mode 100644 index 00000000..6f4ac9b1 --- /dev/null +++ b/apps/web/src/hooks/useStoredCommandLog.test.ts @@ -0,0 +1,62 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { renderHookWithQuery, waitFor } from '../test/test-utils'; + +vi.mock('../api/metrics', () => ({ + metricsApi: { getStoredCommandLog: vi.fn() }, +})); + +import { metricsApi } from '../api/metrics'; +import { useStoredCommandLog } from './useStoredCommandLog'; + +const mockGet = vi.mocked(metricsApi.getStoredCommandLog); + +beforeEach(() => vi.clearAllMocks()); + +describe('useStoredCommandLog', () => { + it('does not fetch when disabled', () => { + renderHookWithQuery(() => + useStoredCommandLog({ connectionId: 'test', activeTab: 'slow', page: 0, enabled: false }), + ); + expect(mockGet).not.toHaveBeenCalled(); + }); + + it('fetches entries with pagination', async () => { + const entries = Array.from({ length: 101 }, (_, i) => ({ id: i })); + mockGet.mockResolvedValue(entries); + + const { result } = renderHookWithQuery(() => + useStoredCommandLog({ + connectionId: 'test', + startTime: 1000, + endTime: 2000, + activeTab: 'slow', + page: 0, + }), + ); + + await waitFor(() => expect(result.current.data).toBeDefined()); + + expect(result.current.data!.entries).toHaveLength(100); + expect(result.current.data!.hasMore).toBe(true); + }); + + it('sets hasMore to false when fewer entries returned', async () => { + const entries = Array.from({ length: 50 }, (_, i) => ({ id: i })); + mockGet.mockResolvedValue(entries); + + const { result } = renderHookWithQuery(() => + useStoredCommandLog({ + connectionId: 'test', + startTime: 1000, + endTime: 2000, + activeTab: 'slow', + page: 0, + }), + ); + + await waitFor(() => expect(result.current.data).toBeDefined()); + + expect(result.current.data!.entries).toHaveLength(50); + expect(result.current.data!.hasMore).toBe(false); + }); +}); diff --git a/apps/web/src/hooks/useStoredCommandLog.ts b/apps/web/src/hooks/useStoredCommandLog.ts new file mode 100644 index 00000000..a446c559 --- /dev/null +++ b/apps/web/src/hooks/useStoredCommandLog.ts @@ -0,0 +1,44 @@ +import { useQuery, keepPreviousData } from '@tanstack/react-query'; +import { metricsApi } from '../api/metrics'; +import type { CommandLogType } from '../types/metrics'; + +export const COMMAND_LOG_PAGE_SIZE = 100; + +interface UseStoredCommandLogOptions { + connectionId?: string; + startTime?: number; + endTime?: number; + activeTab: CommandLogType; + page: number; + enabled?: boolean; +} + +export function useStoredCommandLog({ + connectionId, + startTime, + endTime, + activeTab, + page, + enabled = true, +}: UseStoredCommandLogOptions) { + return useQuery({ + queryKey: ['stored-commandlog', connectionId, activeTab, startTime, endTime, page], + queryFn: async () => { + const offset = page * COMMAND_LOG_PAGE_SIZE; + const entries = await metricsApi.getStoredCommandLog({ + startTime, + endTime, + type: activeTab, + limit: COMMAND_LOG_PAGE_SIZE + 1, + offset, + }); + const hasMore = entries.length > COMMAND_LOG_PAGE_SIZE; + return { + entries: hasMore ? entries.slice(0, COMMAND_LOG_PAGE_SIZE) : entries, + hasMore, + }; + }, + enabled: enabled && startTime !== undefined && endTime !== undefined, + placeholderData: keepPreviousData, + }); +} diff --git a/apps/web/src/hooks/useStoredCommandLogPatterns.test.ts b/apps/web/src/hooks/useStoredCommandLogPatterns.test.ts new file mode 100644 index 00000000..a7af302d --- /dev/null +++ b/apps/web/src/hooks/useStoredCommandLogPatterns.test.ts @@ -0,0 +1,39 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { renderHookWithQuery, waitFor } from '../test/test-utils'; + +vi.mock('../api/metrics', () => ({ + metricsApi: { getStoredCommandLogPatternAnalysis: vi.fn() }, +})); + +import { metricsApi } from '../api/metrics'; +import { useStoredCommandLogPatterns } from './useStoredCommandLogPatterns'; + +const mockGet = vi.mocked(metricsApi.getStoredCommandLogPatternAnalysis); + +beforeEach(() => vi.clearAllMocks()); + +describe('useStoredCommandLogPatterns', () => { + it('does not fetch when disabled', () => { + renderHookWithQuery(() => + useStoredCommandLogPatterns({ connectionId: 'test', activeTab: 'slow', enabled: false }), + ); + expect(mockGet).not.toHaveBeenCalled(); + }); + + it('fetches patterns when enabled', async () => { + const mockData = { patterns: [{ pattern: 'GET *', count: 10 }] }; + mockGet.mockResolvedValue(mockData); + + const { result } = renderHookWithQuery(() => + useStoredCommandLogPatterns({ + connectionId: 'test', + startTime: 1000, + endTime: 2000, + activeTab: 'slow', + }), + ); + + await waitFor(() => expect(result.current.data).toEqual(mockData)); + expect(mockGet).toHaveBeenCalledWith({ startTime: 1000, endTime: 2000, type: 'slow', limit: 500 }); + }); +}); diff --git a/apps/web/src/hooks/useStoredCommandLogPatterns.ts b/apps/web/src/hooks/useStoredCommandLogPatterns.ts new file mode 100644 index 00000000..8e3bbe1c --- /dev/null +++ b/apps/web/src/hooks/useStoredCommandLogPatterns.ts @@ -0,0 +1,26 @@ +import { useQuery } from '@tanstack/react-query'; +import { metricsApi } from '../api/metrics'; +import type { CommandLogType, SlowLogPatternAnalysis } from '../types/metrics'; + +interface UseStoredCommandLogPatternsOptions { + connectionId?: string; + startTime?: number; + endTime?: number; + activeTab: CommandLogType; + enabled?: boolean; +} + +export function useStoredCommandLogPatterns({ + connectionId, + startTime, + endTime, + activeTab, + enabled = true, +}: UseStoredCommandLogPatternsOptions) { + return useQuery({ + queryKey: ['stored-commandlog-patterns', connectionId, activeTab, startTime, endTime], + queryFn: () => + metricsApi.getStoredCommandLogPatternAnalysis({ startTime, endTime, type: activeTab, limit: 500 }), + enabled: enabled && startTime !== undefined && endTime !== undefined, + }); +} diff --git a/apps/web/src/hooks/useStoredLatency.test.ts b/apps/web/src/hooks/useStoredLatency.test.ts new file mode 100644 index 00000000..8b18f239 --- /dev/null +++ b/apps/web/src/hooks/useStoredLatency.test.ts @@ -0,0 +1,75 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { renderHookWithQuery, waitFor } from '../test/test-utils'; + +vi.mock('../api/metrics', () => ({ + metricsApi: { + getStoredLatencySnapshots: vi.fn(), + getStoredLatencyHistograms: vi.fn(), + }, +})); + +import { metricsApi } from '../api/metrics'; +import { useStoredLatencySnapshots, useStoredLatencyHistograms } from './useStoredLatency'; + +const mockGetSnapshots = vi.mocked(metricsApi.getStoredLatencySnapshots); +const mockGetHistograms = vi.mocked(metricsApi.getStoredLatencyHistograms); + +beforeEach(() => { + vi.clearAllMocks(); +}); + +describe('useStoredLatencySnapshots', () => { + it('does not fetch when time range is not set', () => { + renderHookWithQuery(() => useStoredLatencySnapshots({ connectionId: 'test' })); + expect(mockGetSnapshots).not.toHaveBeenCalled(); + }); + + it('fetches snapshots when time range is set', async () => { + const mockData = [{ id: '1', eventName: 'get', maxLatency: 5 }]; + mockGetSnapshots.mockResolvedValue(mockData); + + const { result } = renderHookWithQuery(() => + useStoredLatencySnapshots({ connectionId: 'test', startTime: 1000, endTime: 2000 }), + ); + + await waitFor(() => { + expect(result.current.data).toEqual(mockData); + }); + }); +}); + +describe('useStoredLatencyHistograms', () => { + it('does not fetch when disabled', () => { + renderHookWithQuery(() => + useStoredLatencyHistograms({ connectionId: 'test', startTime: 1000, endTime: 2000, enabled: false }), + ); + expect(mockGetHistograms).not.toHaveBeenCalled(); + }); + + it('returns histogram data from first result', async () => { + const histData = { get: { avg: 1, min: 0, max: 5, p50: 1, p99: 4 } }; + mockGetHistograms.mockResolvedValue([{ data: histData }]); + + const { result } = renderHookWithQuery(() => + useStoredLatencyHistograms({ connectionId: 'test', startTime: 1000, endTime: 2000 }), + ); + + await waitFor(() => { + expect(result.current.data).toEqual(histData); + }); + }); + + it('returns null when no histograms found', async () => { + mockGetHistograms.mockResolvedValue([]); + + const { result } = renderHookWithQuery(() => + useStoredLatencyHistograms({ connectionId: 'test', startTime: 1000, endTime: 2000 }), + ); + + await waitFor(() => { + expect(result.current.isFetched).toBe(true); + }); + + expect(result.current.data).toBeNull(); + }); +}); diff --git a/apps/web/src/hooks/useStoredLatency.ts b/apps/web/src/hooks/useStoredLatency.ts new file mode 100644 index 00000000..1e34c98f --- /dev/null +++ b/apps/web/src/hooks/useStoredLatency.ts @@ -0,0 +1,38 @@ +import { useQuery } from '@tanstack/react-query'; +import { metricsApi } from '../api/metrics'; + +interface UseStoredLatencyOptions { + connectionId?: string; + startTime?: number; + endTime?: number; + enabled?: boolean; +} + +export function useStoredLatencySnapshots({ + connectionId, + startTime, + endTime, + enabled = true, +}: UseStoredLatencyOptions) { + return useQuery({ + queryKey: ['stored-latency-snapshots', connectionId, startTime, endTime], + queryFn: () => metricsApi.getStoredLatencySnapshots({ startTime, endTime, limit: 500 }), + enabled: enabled && startTime !== undefined && endTime !== undefined, + }); +} + +export function useStoredLatencyHistograms({ + connectionId, + startTime, + endTime, + enabled = true, +}: UseStoredLatencyOptions) { + return useQuery({ + queryKey: ['stored-latency-histograms', connectionId, startTime, endTime], + queryFn: async () => { + const histograms = await metricsApi.getStoredLatencyHistograms({ startTime, endTime, limit: 1 }); + return histograms.length > 0 ? histograms[0].data : null; + }, + enabled: enabled && startTime !== undefined && endTime !== undefined, + }); +} diff --git a/apps/web/src/hooks/useStoredMemorySnapshots.test.ts b/apps/web/src/hooks/useStoredMemorySnapshots.test.ts new file mode 100644 index 00000000..a272a2b2 --- /dev/null +++ b/apps/web/src/hooks/useStoredMemorySnapshots.test.ts @@ -0,0 +1,66 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { renderHookWithQuery, waitFor } from '../test/test-utils'; + +vi.mock('../api/metrics', () => ({ + metricsApi: { + getStoredMemorySnapshots: vi.fn(), + }, +})); + +import { metricsApi } from '../api/metrics'; +import { useStoredMemorySnapshots } from './useStoredMemorySnapshots'; + +const mockGetSnapshots = vi.mocked(metricsApi.getStoredMemorySnapshots); + +beforeEach(() => { + vi.clearAllMocks(); +}); + +describe('useStoredMemorySnapshots', () => { + it('does not fetch when time filter is not set', () => { + const { result } = renderHookWithQuery(() => + useStoredMemorySnapshots({ connectionId: 'test' }), + ); + expect(mockGetSnapshots).not.toHaveBeenCalled(); + expect(result.current.data).toBeUndefined(); + }); + + it('does not fetch when enabled is false', () => { + const { result } = renderHookWithQuery(() => + useStoredMemorySnapshots({ + connectionId: 'test', + startTime: 1000, + endTime: 2000, + enabled: false, + }), + ); + expect(mockGetSnapshots).not.toHaveBeenCalled(); + expect(result.current.data).toBeUndefined(); + }); + + it('fetches snapshots when time filter is set', async () => { + const mockData = [ + { id: '1', timestamp: 1000, opsPerSec: 100, usedMemory: 500 }, + { id: '2', timestamp: 2000, opsPerSec: 200, usedMemory: 600 }, + ]; + mockGetSnapshots.mockResolvedValue(mockData); + + const { result } = renderHookWithQuery(() => + useStoredMemorySnapshots({ + connectionId: 'test', + startTime: 1000, + endTime: 2000, + }), + ); + + await waitFor(() => { + expect(result.current.data).toEqual(mockData); + }); + + expect(mockGetSnapshots).toHaveBeenCalledWith({ + startTime: 1000, + endTime: 2000, + limit: 500, + }); + }); +}); diff --git a/apps/web/src/hooks/useStoredMemorySnapshots.ts b/apps/web/src/hooks/useStoredMemorySnapshots.ts new file mode 100644 index 00000000..94421636 --- /dev/null +++ b/apps/web/src/hooks/useStoredMemorySnapshots.ts @@ -0,0 +1,24 @@ +import { useQuery } from '@tanstack/react-query'; +import { metricsApi } from '../api/metrics'; + +interface UseStoredMemorySnapshotsOptions { + connectionId?: string; + startTime?: number; + endTime?: number; + enabled?: boolean; + limit?: number; +} + +export function useStoredMemorySnapshots({ + connectionId, + startTime, + endTime, + enabled = true, + limit = 500, +}: UseStoredMemorySnapshotsOptions) { + return useQuery({ + queryKey: ['stored-memory-snapshots', connectionId, startTime, endTime, limit], + queryFn: () => metricsApi.getStoredMemorySnapshots({ startTime, endTime, limit }), + enabled: enabled && startTime !== undefined && endTime !== undefined, + }); +} diff --git a/apps/web/src/hooks/useStoredSlowLog.test.ts b/apps/web/src/hooks/useStoredSlowLog.test.ts new file mode 100644 index 00000000..dd725ab3 --- /dev/null +++ b/apps/web/src/hooks/useStoredSlowLog.test.ts @@ -0,0 +1,32 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { renderHookWithQuery, waitFor } from '../test/test-utils'; + +vi.mock('../api/metrics', () => ({ + metricsApi: { getStoredSlowLog: vi.fn() }, +})); + +import { metricsApi } from '../api/metrics'; +import { useStoredSlowLog } from './useStoredSlowLog'; + +const mockGet = vi.mocked(metricsApi.getStoredSlowLog); + +beforeEach(() => vi.clearAllMocks()); + +describe('useStoredSlowLog', () => { + it('does not fetch when time range is not set', () => { + renderHookWithQuery(() => useStoredSlowLog({ connectionId: 'test' })); + expect(mockGet).not.toHaveBeenCalled(); + }); + + it('fetches slow log when time range is set', async () => { + const mockData = [{ id: 1, command: 'GET foo', duration: 500 }]; + mockGet.mockResolvedValue(mockData); + + const { result } = renderHookWithQuery(() => + useStoredSlowLog({ connectionId: 'test', startTime: 1000, endTime: 2000 }), + ); + + await waitFor(() => expect(result.current.data).toEqual(mockData)); + expect(mockGet).toHaveBeenCalledWith({ startTime: 1000, endTime: 2000, limit: 100 }); + }); +}); diff --git a/apps/web/src/hooks/useStoredSlowLog.ts b/apps/web/src/hooks/useStoredSlowLog.ts new file mode 100644 index 00000000..860ebe35 --- /dev/null +++ b/apps/web/src/hooks/useStoredSlowLog.ts @@ -0,0 +1,22 @@ +import { useQuery } from '@tanstack/react-query'; +import { metricsApi } from '../api/metrics'; + +interface UseStoredSlowLogOptions { + connectionId?: string; + startTime?: number; + endTime?: number; + enabled?: boolean; +} + +export function useStoredSlowLog({ + connectionId, + startTime, + endTime, + enabled = true, +}: UseStoredSlowLogOptions) { + return useQuery({ + queryKey: ['stored-slowlog', connectionId, startTime, endTime], + queryFn: () => metricsApi.getStoredSlowLog({ startTime, endTime, limit: 100 }), + enabled: enabled && startTime !== undefined && endTime !== undefined, + }); +} diff --git a/apps/web/src/hooks/useVersionCheck.test.ts b/apps/web/src/hooks/useVersionCheck.test.ts new file mode 100644 index 00000000..dcfed816 --- /dev/null +++ b/apps/web/src/hooks/useVersionCheck.test.ts @@ -0,0 +1,112 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { renderHookWithQuery, waitFor, act } from '../test/test-utils'; + +vi.mock('../api/version', () => ({ + versionApi: { + getVersion: vi.fn(), + }, +})); + +import { versionApi } from '../api/version'; +import { useVersionCheckState } from './useVersionCheck'; + +const mockGetVersion = vi.mocked(versionApi.getVersion); + +const storage = new Map(); +const mockLocalStorage = { + getItem: vi.fn((key: string) => storage.get(key) ?? null), + setItem: vi.fn((key: string, value: string) => storage.set(key, value)), + removeItem: vi.fn((key: string) => storage.delete(key)), +}; + +beforeEach(() => { + vi.clearAllMocks(); + storage.clear(); + vi.stubGlobal('localStorage', mockLocalStorage); +}); + +describe('useVersionCheckState', () => { + it('starts in loading state', () => { + mockGetVersion.mockReturnValue(new Promise(() => {})); + const { result } = renderHookWithQuery(() => useVersionCheckState()); + expect(result.current.loading).toBe(true); + }); + + it('returns version info on success', async () => { + mockGetVersion.mockResolvedValue({ + current: '1.0.0', + latest: '1.1.0', + updateAvailable: true, + releaseUrl: 'https://example.com/release', + checkedAt: Date.now(), + }); + + const { result } = renderHookWithQuery(() => useVersionCheckState()); + + await waitFor(() => { + expect(result.current.loading).toBe(false); + }); + + expect(result.current.current).toBe('1.0.0'); + expect(result.current.latest).toBe('1.1.0'); + expect(result.current.updateAvailable).toBe(true); + expect(result.current.error).toBeNull(); + }); + + it('handles dismiss and persists to localStorage', async () => { + mockGetVersion.mockResolvedValue({ + current: '1.0.0', + latest: '1.1.0', + updateAvailable: true, + releaseUrl: null, + checkedAt: Date.now(), + }); + + const { result } = renderHookWithQuery(() => useVersionCheckState()); + + await waitFor(() => { + expect(result.current.loading).toBe(false); + }); + + expect(result.current.dismissed).toBe(false); + + act(() => { + result.current.dismiss(); + }); + + expect(result.current.dismissed).toBe(true); + expect(mockLocalStorage.setItem).toHaveBeenCalledWith('betterdb_update_dismissed_version', '1.1.0'); + }); + + it('is not dismissed when a newer version arrives', async () => { + storage.set('betterdb_update_dismissed_version', '1.0.0'); + + mockGetVersion.mockResolvedValue({ + current: '1.0.0', + latest: '1.1.0', + updateAvailable: true, + releaseUrl: null, + checkedAt: Date.now(), + }); + + const { result } = renderHookWithQuery(() => useVersionCheckState()); + + await waitFor(() => { + expect(result.current.loading).toBe(false); + }); + + expect(result.current.dismissed).toBe(false); + }); + + it('returns error on failure', async () => { + mockGetVersion.mockRejectedValue(new Error('Fetch failed')); + + const { result } = renderHookWithQuery(() => useVersionCheckState()); + + await waitFor(() => { + expect(result.current.loading).toBe(false); + }); + + expect(result.current.error).toBeInstanceOf(Error); + }); +}); diff --git a/apps/web/src/hooks/useVersionCheck.ts b/apps/web/src/hooks/useVersionCheck.ts index 6a2c84d7..eda053d9 100644 --- a/apps/web/src/hooks/useVersionCheck.ts +++ b/apps/web/src/hooks/useVersionCheck.ts @@ -1,19 +1,17 @@ -import { useState, useEffect, useCallback, useRef, createContext, useContext } from 'react'; +import { useState, useCallback, createContext, useContext } from 'react'; +import { useQuery, useQueryClient } from '@tanstack/react-query'; import { versionApi } from '../api/version'; import type { VersionInfo } from '@betterdb/shared'; -interface VersionCheckState extends VersionInfo { +interface VersionCheckContextValue extends VersionInfo { loading: boolean; error: Error | null; dismissed: boolean; -} - -interface VersionCheckContextValue extends VersionCheckState { dismiss: () => void; refresh: () => Promise; } -const DEFAULT_STATE: VersionCheckState = { +const DEFAULT_STATE: VersionCheckContextValue = { current: 'unknown', latest: null, updateAvailable: false, @@ -22,81 +20,50 @@ const DEFAULT_STATE: VersionCheckState = { loading: true, error: null, dismissed: false, -}; - -export const VersionCheckContext = createContext({ - ...DEFAULT_STATE, dismiss: () => {}, refresh: async () => {}, -}); +}; + +export const VersionCheckContext = createContext(DEFAULT_STATE); const DISMISS_KEY = 'betterdb_update_dismissed_version'; export function useVersionCheckState(): VersionCheckContextValue { - const [state, setState] = useState(() => { - // Check if user dismissed this version - const dismissedVersion = localStorage.getItem(DISMISS_KEY); - return { - ...DEFAULT_STATE, - dismissed: !!dismissedVersion, - }; + const queryClient = useQueryClient(); + const [dismissedVersion, setDismissedVersion] = useState( + () => localStorage.getItem(DISMISS_KEY), + ); + + const { data, isLoading, error } = useQuery({ + queryKey: ['version-check'], + queryFn: () => versionApi.getVersion(), + refetchInterval: (query) => { + const intervalMs = (query.state.data as VersionInfo & { versionCheckIntervalMs?: number }) + ?.versionCheckIntervalMs; + return intervalMs ?? 3600000; + }, }); - const intervalRef = useRef | undefined>(undefined); - const intervalMsRef = useRef(3600000); - - const fetchVersion = useCallback(async () => { - setState((prev) => ({ ...prev, loading: true, error: null })); - try { - const info = await versionApi.getVersion(); - - if (info.versionCheckIntervalMs) { - intervalMsRef.current = info.versionCheckIntervalMs; - } - - // Check if this specific version was dismissed - const dismissedVersion = localStorage.getItem(DISMISS_KEY); - const isDismissed = dismissedVersion === info.latest; - - setState({ - ...info, - loading: false, - error: null, - dismissed: isDismissed, - }); - } catch (err) { - setState((prev) => ({ - ...prev, - loading: false, - error: err instanceof Error ? err : new Error('Failed to fetch version'), - })); - } - }, []); - const dismiss = useCallback(() => { - setState((prev) => { - if (prev.latest) { - localStorage.setItem(DISMISS_KEY, prev.latest); - } - return { ...prev, dismissed: true }; - }); - }, []); + if (data?.latest) { + localStorage.setItem(DISMISS_KEY, data.latest); + setDismissedVersion(data.latest); + } + }, [data?.latest]); const refresh = useCallback(async () => { - await fetchVersion(); - }, [fetchVersion]); - - useEffect(() => { - fetchVersion().then(() => { - intervalRef.current = setInterval(fetchVersion, intervalMsRef.current); - }); - return () => { - if (intervalRef.current) clearInterval(intervalRef.current); - }; - }, [fetchVersion]); + await queryClient.invalidateQueries({ queryKey: ['version-check'] }); + }, [queryClient]); return { - ...state, + current: data?.current ?? 'unknown', + latest: data?.latest ?? null, + updateAvailable: data?.updateAvailable ?? false, + releaseUrl: data?.releaseUrl ?? null, + checkedAt: data?.checkedAt ?? null, + loading: isLoading, + error: error ?? null, + dismissed: dismissedVersion !== null && dismissedVersion === data?.latest, dismiss, refresh, }; diff --git a/apps/web/src/main.tsx b/apps/web/src/main.tsx index 9aa52ffd..2e82165d 100644 --- a/apps/web/src/main.tsx +++ b/apps/web/src/main.tsx @@ -1,10 +1,23 @@ import React from 'react'; import ReactDOM from 'react-dom/client'; +import { QueryClient, QueryClientProvider } from '@tanstack/react-query'; import App from './App'; import './index.css'; +const queryClient = new QueryClient({ + defaultOptions: { + queries: { + staleTime: 5_000, + retry: 1, + refetchOnWindowFocus: true, + }, + }, +}); + ReactDOM.createRoot(document.getElementById('root')!).render( - + + + , ); diff --git a/apps/web/src/pages/Dashboard.tsx b/apps/web/src/pages/Dashboard.tsx index 8f0a9ff6..538411ac 100644 --- a/apps/web/src/pages/Dashboard.tsx +++ b/apps/web/src/pages/Dashboard.tsx @@ -2,6 +2,7 @@ import { useState, useEffect, useRef } from 'react'; import { metricsApi } from '../api/metrics'; import { usePolling } from '../hooks/usePolling'; import { useConnection } from '../hooks/useConnection'; +import { useStoredMemorySnapshots } from '../hooks/useStoredMemorySnapshots'; import { ConnectionCard } from '../components/dashboard/ConnectionCard'; import { OverviewCards } from '../components/dashboard/OverviewCards'; import { MemoryChart } from '../components/dashboard/MemoryChart'; @@ -12,7 +13,6 @@ import { deriveStoredIoDeltas } from '../components/dashboard/io-threads.utils'; import { EventTimeline } from '../components/dashboard/EventTimeline'; import { CapabilitiesBadges } from '../components/dashboard/CapabilitiesBadges'; import { DateRangePicker, DateRange } from '../components/ui/date-range-picker'; -import type { StoredMemorySnapshot } from '../types/metrics'; export function Dashboard() { const { currentConnection } = useConnection(); @@ -44,22 +44,12 @@ export function Dashboard() { const endTime = dateRange?.to ? dateRange.to.getTime() : undefined; const isTimeFiltered = startTime !== undefined && endTime !== undefined; - const [storedMemorySnapshots, setStoredMemorySnapshots] = useState(null); - - useEffect(() => { - if (!isTimeFiltered) { - setStoredMemorySnapshots(null); - return; - } - - setStoredMemorySnapshots(null); - let cancelled = false; - metricsApi.getStoredMemorySnapshots({ startTime, endTime, limit: 500 }) - .then(data => { if (!cancelled) setStoredMemorySnapshots(data); }) - .catch(err => { console.error('Failed to fetch stored memory snapshots:', err); }); - - return () => { cancelled = true; }; - }, [startTime, endTime, isTimeFiltered, currentConnection?.id]); + const { data: storedMemorySnapshots } = useStoredMemorySnapshots({ + connectionId: currentConnection?.id, + startTime, + endTime, + enabled: isTimeFiltered, + }); const sortedStoredSnapshots = storedMemorySnapshots ? [...storedMemorySnapshots].sort((a, b) => a.timestamp - b.timestamp) diff --git a/apps/web/src/pages/KeyAnalytics.tsx b/apps/web/src/pages/KeyAnalytics.tsx index a90d53d6..f7a8d744 100644 --- a/apps/web/src/pages/KeyAnalytics.tsx +++ b/apps/web/src/pages/KeyAnalytics.tsx @@ -1,6 +1,7 @@ import { useState, useMemo, useRef, useEffect } from 'react'; import { keyAnalyticsApi } from '../api/keyAnalytics'; import type { HotKeyEntry } from '../api/keyAnalytics'; +import { useBaselineHotKeys } from '../hooks/useBaselineHotKeys'; import { extractPattern } from '@betterdb/shared'; import { usePolling } from '../hooks/usePolling'; import { useConnection } from '../hooks/useConnection'; @@ -137,25 +138,12 @@ export function KeyAnalytics() { }); // Fetch history within the selected date range for rank delta - const [baselineHotKeys, setBaselineHotKeys] = useState(null); - - useEffect(() => { - if (activeTab !== 'hot-keys' || !isHotKeyTimeFiltered) { - setBaselineHotKeys(null); - return; - } - let cancelled = false; - keyAnalyticsApi.getHotKeys({ - limit: 50, - startTime: hotKeyStartTime, - endTime: hotKeyEndTime, - oldest: true, - }).then(entries => { - if (cancelled) return; - setBaselineHotKeys(entries.length > 0 ? entries : null); - }).catch(() => setBaselineHotKeys(null)); - return () => { cancelled = true; }; - }, [activeTab, hotKeyStartTime, hotKeyEndTime, isHotKeyTimeFiltered]); + const { data: baselineHotKeys } = useBaselineHotKeys({ + connectionId: currentConnection?.id, + startTime: hotKeyStartTime, + endTime: hotKeyEndTime, + enabled: activeTab === 'hot-keys' && isHotKeyTimeFiltered, + }); // Deduplicate: keep only the latest snapshot per pattern (results are ordered by timestamp DESC) const patterns = useMemo(() => { @@ -733,7 +721,7 @@ export function KeyAnalytics() { {hotKeys?.map((entry: HotKeyEntry) => { - const delta = getRankDelta(entry.rank, entry.keyName, baselineHotKeys); + const delta = getRankDelta(entry.rank, entry.keyName, baselineHotKeys ?? null); const isTop10 = entry.rank <= 10; return ( (null); const [selectedCommand, setSelectedCommand] = useState(null); - const [historyData, setHistoryData] = useState([]); - const [historyLoading, setHistoryLoading] = useState(false); - const [historyError, setHistoryError] = useState(null); - const [doctorReport, setDoctorReport] = useState(); - const [doctorLoading, setDoctorLoading] = useState(true); + const { data: historyData = [], isLoading: historyLoading, error: historyErrorObj } = useLatencyHistory(selectedEvent, currentConnection?.id); + const historyError = historyErrorObj?.message ?? null; + const { data: doctorReport, isLoading: doctorLoading } = useLatencyDoctor(currentConnection?.id); // Time filter state — initialise from URL ?start=&end= (epoch ms) const [dateRange, setDateRange] = useState(() => { @@ -77,33 +78,18 @@ export function Latency() { }); // Stored data (with time filter) - const [storedSnapshots, setStoredSnapshots] = useState(null); - const [storedHistogramData, setStoredHistogramData] = useState | null>(null); - - useEffect(() => { - if (!isTimeFiltered) { - setStoredSnapshots(null); - setStoredHistogramData(null); - return; - } - - setStoredSnapshots(null); - setStoredHistogramData(null); - let cancelled = false; - - Promise.all([ - metricsApi.getStoredLatencySnapshots({ startTime, endTime, limit: 500 }), - metricsApi.getStoredLatencyHistograms({ startTime, endTime, limit: 1 }), - ]).then(([snapshots, histograms]) => { - if (cancelled) return; - setStoredSnapshots(snapshots); - setStoredHistogramData(histograms.length > 0 ? histograms[0].data : null); - }).catch(err => { - console.error('Failed to fetch stored latency data:', err); - }); - - return () => { cancelled = true; }; - }, [startTime, endTime, isTimeFiltered, currentConnection?.id]); + const { data: storedSnapshots } = useStoredLatencySnapshots({ + connectionId: currentConnection?.id, + startTime, + endTime, + enabled: isTimeFiltered, + }); + const { data: storedHistogramData } = useStoredLatencyHistograms({ + connectionId: currentConnection?.id, + startTime, + endTime, + enabled: isTimeFiltered, + }); // Convert stored snapshots to LatencyEvent[] shape, keeping only the latest per eventName const storedAsEvents: LatencyEvent[] | null = storedSnapshots @@ -120,25 +106,6 @@ export function Latency() { const latencyEvents = isTimeFiltered ? storedAsEvents : liveLatencyEvents; const histogramData = isTimeFiltered ? storedHistogramData : liveHistogramData; - useEffect(() => { - if (selectedEvent) { - setHistoryLoading(true); - setHistoryError(null); - metricsApi.getLatencyHistory(selectedEvent) - .then(setHistoryData) - .catch((err) => setHistoryError(err.message || 'Failed to fetch history')) - .finally(() => setHistoryLoading(false)); - } - }, [selectedEvent]); - - useEffect(() => { - setDoctorLoading(true); - metricsApi.getLatencyDoctor() - .then(data => setDoctorReport(data.report)) - .catch(console.error) - .finally(() => setDoctorLoading(false)); - }, [currentConnection?.id]); - const formatLatency = (latency: number) => { if (latency < 1000) return `${latency}µs`; if (latency < 1000000) return `${(latency / 1000).toFixed(2)}ms`; diff --git a/apps/web/src/pages/Settings.tsx b/apps/web/src/pages/Settings.tsx index 6c791bdc..f673c827 100644 --- a/apps/web/src/pages/Settings.tsx +++ b/apps/web/src/pages/Settings.tsx @@ -1,6 +1,7 @@ -import { useState, useEffect, useCallback } from 'react'; +import { useState, useEffect } from 'react'; import { settingsApi } from '../api/settings'; -import { agentTokensApi, TokenListItem, GeneratedToken } from '../api/agent-tokens'; +import { agentTokensApi, GeneratedToken } from '../api/agent-tokens'; +import { useMcpTokens } from '../hooks/useMcpTokens'; import { useConnection } from '../hooks/useConnection'; import { AppSettings, SettingsUpdateRequest } from '@betterdb/shared'; import { Card } from '../components/ui/card'; @@ -21,32 +22,19 @@ export function Settings({ isCloudMode = false }: { isCloudMode?: boolean }) { const [hasChanges, setHasChanges] = useState(false); // MCP Tokens state (must be before any early returns) - const [mcpTokens, setMcpTokens] = useState([]); + const { tokens: mcpTokens, invalidate: invalidateMcpTokens } = useMcpTokens( + isCloudMode && activeCategory === 'mcpTokens', + ); const [mcpTokenName, setMcpTokenName] = useState(''); const [mcpGenerating, setMcpGenerating] = useState(false); const [mcpGeneratedToken, setMcpGeneratedToken] = useState(null); const [mcpCopied, setMcpCopied] = useState(false); const [mcpError, setMcpError] = useState(null); - const loadMcpTokens = useCallback(async () => { - try { - const tokens = await agentTokensApi.list('mcp'); - setMcpTokens(tokens); - } catch { - // Token API not available in community mode - } - }, []); - useEffect(() => { loadSettings(); }, [currentConnection?.id]); - useEffect(() => { - if (isCloudMode && activeCategory === 'mcpTokens') { - loadMcpTokens(); - } - }, [isCloudMode, activeCategory, loadMcpTokens]); - const loadSettings = async () => { try { setLoading(true); @@ -140,7 +128,7 @@ export function Settings({ isCloudMode = false }: { isCloudMode?: boolean }) { const result = await agentTokensApi.generate(mcpTokenName.trim(), 'mcp'); setMcpGeneratedToken(result); setMcpTokenName(''); - await loadMcpTokens(); + await invalidateMcpTokens(); } catch (err) { setMcpError(err instanceof Error ? err.message : 'Failed to generate token'); } finally { @@ -151,7 +139,7 @@ export function Settings({ isCloudMode = false }: { isCloudMode?: boolean }) { const handleMcpRevoke = async (id: string) => { try { await agentTokensApi.revoke(id); - await loadMcpTokens(); + await invalidateMcpTokens(); } catch (err) { setMcpError(err instanceof Error ? err.message : 'Failed to revoke token'); } diff --git a/apps/web/src/pages/SlowLog.tsx b/apps/web/src/pages/SlowLog.tsx index 79505092..46364835 100644 --- a/apps/web/src/pages/SlowLog.tsx +++ b/apps/web/src/pages/SlowLog.tsx @@ -1,16 +1,19 @@ -import { useState, useMemo, useEffect } from 'react'; +import { useState, useMemo } from 'react'; import { useSearchParams } from 'react-router-dom'; import { metricsApi } from '../api/metrics'; import { usePolling } from '../hooks/usePolling'; import { useCapabilities } from '../hooks/useCapabilities'; import { useConnection } from '../hooks/useConnection'; +import { useStoredSlowLog } from '../hooks/useStoredSlowLog'; +import { useStoredCommandLog, COMMAND_LOG_PAGE_SIZE } from '../hooks/useStoredCommandLog'; +import { useStoredCommandLogPatterns } from '../hooks/useStoredCommandLogPatterns'; import { Card, CardHeader, CardTitle, CardContent } from '../components/ui/card'; import { SlowLogTable } from '../components/metrics/SlowLogTable'; import { CommandLogTable } from '../components/metrics/CommandLogTable'; import { SlowLogPatternAnalysisView } from '../components/metrics/SlowLogPatternAnalysis'; import { DateRangePicker, DateRange } from '../components/ui/date-range-picker'; import { UnavailableOverlay } from '../components/UnavailableOverlay'; -import type { CommandLogType, SlowLogEntry, CommandLogEntry } from '../types/metrics'; +import type { CommandLogType } from '../types/metrics'; function getTabFromParams(params: URLSearchParams): CommandLogType { const tab = params.get('tab'); @@ -42,7 +45,7 @@ export function SlowLog() { const [viewMode, setViewMode] = useState<'table' | 'patterns'>('table'); // Pagination state (only for stored/filtered data) - const PAGE_SIZE = 100; + const PAGE_SIZE = COMMAND_LOG_PAGE_SIZE; const [page, setPage] = useState(0); // Time filter state — initialise from URL ?start=&end= (epoch ms) @@ -94,27 +97,12 @@ export function SlowLog() { }); // Stored slow log (with time filter) - const [storedSlowLog, setStoredSlowLog] = useState(null); - - useEffect(() => { - if (!isTimeFiltered || hasCommandLog) { - setStoredSlowLog(null); - return; - } - - let cancelled = false; - metricsApi.getStoredSlowLog({ startTime, endTime, limit: 100 }) - .then(data => { - if (!cancelled) { - setStoredSlowLog(data); - } - }) - .catch(err => { - console.error('Failed to fetch stored slow log:', err); - }); - - return () => { cancelled = true; }; - }, [startTime, endTime, isTimeFiltered, hasCommandLog, currentConnection?.id]); + const { data: storedSlowLog } = useStoredSlowLog({ + connectionId: currentConnection?.id, + startTime, + endTime, + enabled: isTimeFiltered && !hasCommandLog, + }); // Use stored data when filtered, live data otherwise const slowLog = isTimeFiltered ? storedSlowLog : liveSlowLog; @@ -143,55 +131,21 @@ export function SlowLog() { }); // Stored command log (with time filter and pagination) - const [storedCommandLog, setStoredCommandLog] = useState<{ - slow: CommandLogEntry[] | null; - 'large-request': CommandLogEntry[] | null; - 'large-reply': CommandLogEntry[] | null; - }>({ slow: null, 'large-request': null, 'large-reply': null }); - const [hasMoreEntries, setHasMoreEntries] = useState(false); - - useEffect(() => { - if (!isTimeFiltered || !hasCommandLog) { - setStoredCommandLog({ slow: null, 'large-request': null, 'large-reply': null }); - setHasMoreEntries(false); - return; - } - - let cancelled = false; - const offset = page * PAGE_SIZE; - - // Fetch only the active tab with pagination - metricsApi.getStoredCommandLog({ - startTime, - endTime, - type: activeTab, - limit: PAGE_SIZE + 1, // Fetch one extra to check if there are more - offset, - }).then((entries) => { - if (!cancelled) { - // Check if there are more entries - const hasMore = entries.length > PAGE_SIZE; - setHasMoreEntries(hasMore); - - // Only keep PAGE_SIZE entries - const trimmedEntries = hasMore ? entries.slice(0, PAGE_SIZE) : entries; - - setStoredCommandLog(prev => ({ - ...prev, - [activeTab]: trimmedEntries, - })); - } - }).catch(err => { - console.error('Failed to fetch stored command log:', err); - }); - - return () => { cancelled = true; }; - }, [startTime, endTime, isTimeFiltered, hasCommandLog, activeTab, page, currentConnection?.id]); + const { data: storedCommandLogResult } = useStoredCommandLog({ + connectionId: currentConnection?.id, + startTime, + endTime, + activeTab, + page, + enabled: isTimeFiltered && hasCommandLog, + }); + const storedCommandLogEntries = storedCommandLogResult?.entries ?? null; + const hasMoreEntries = storedCommandLogResult?.hasMore ?? false; // Use stored data when filtered, live data otherwise - const commandLogSlow = isTimeFiltered ? storedCommandLog.slow : liveCommandLogSlow; - const commandLogLargeRequest = isTimeFiltered ? storedCommandLog['large-request'] : liveCommandLogLargeRequest; - const commandLogLargeReply = isTimeFiltered ? storedCommandLog['large-reply'] : liveCommandLogLargeReply; + const commandLogSlow = isTimeFiltered ? (activeTab === 'slow' ? storedCommandLogEntries : null) : liveCommandLogSlow; + const commandLogLargeRequest = isTimeFiltered ? (activeTab === 'large-request' ? storedCommandLogEntries : null) : liveCommandLogLargeRequest; + const commandLogLargeReply = isTimeFiltered ? (activeTab === 'large-reply' ? storedCommandLogEntries : null) : liveCommandLogLargeReply; // Pattern analysis (less frequent polling since it's analytical) // Live pattern analysis (no time filter) @@ -211,27 +165,13 @@ export function SlowLog() { }); // Stored pattern analysis (with time filter) - const [storedCommandLogPatternAnalysis, setStoredCommandLogPatternAnalysis] = useState(null); - - useEffect(() => { - if (!isTimeFiltered || !hasCommandLog || viewMode !== 'patterns') { - setStoredCommandLogPatternAnalysis(null); - return; - } - - let cancelled = false; - metricsApi.getStoredCommandLogPatternAnalysis({ startTime, endTime, type: activeTab, limit: 500 }) - .then(data => { - if (!cancelled) { - setStoredCommandLogPatternAnalysis(data); - } - }) - .catch(err => { - console.error('Failed to fetch stored command log pattern analysis:', err); - }); - - return () => { cancelled = true; }; - }, [startTime, endTime, isTimeFiltered, hasCommandLog, viewMode, activeTab, currentConnection?.id]); + const { data: storedCommandLogPatternAnalysis } = useStoredCommandLogPatterns({ + connectionId: currentConnection?.id, + startTime, + endTime, + activeTab, + enabled: isTimeFiltered && hasCommandLog && viewMode === 'patterns', + }); // Use stored data when filtered, live data otherwise const slowLogPatternAnalysis = liveSlowLogPatternAnalysis; diff --git a/apps/web/src/pages/ThroughputForecasting.tsx b/apps/web/src/pages/ThroughputForecasting.tsx index acf4cc03..3fa73325 100644 --- a/apps/web/src/pages/ThroughputForecasting.tsx +++ b/apps/web/src/pages/ThroughputForecasting.tsx @@ -1,12 +1,8 @@ -import { useCallback, useEffect, useRef, useState } from 'react'; +import { useRef, useState } from 'react'; +import { useQuery, useQueryClient } from '@tanstack/react-query'; import { useConnection } from '../hooks/useConnection'; -import { usePolling } from '../hooks/usePolling'; import { metricsApi } from '../api/metrics'; -import type { - ThroughputForecast, - ThroughputSettings, - ThroughputSettingsUpdate, -} from '@betterdb/shared'; +import type { ThroughputSettingsUpdate } from '@betterdb/shared'; import { ForecastCard, formatTime, @@ -19,70 +15,64 @@ import { export function ThroughputForecasting() { const { currentConnection } = useConnection(); - const [settings, setSettings] = useState(null); + const queryClient = useQueryClient(); const [saveStatus, setSaveStatus] = useState<'idle' | 'saved' | 'error'>('idle'); const saveTimeout = useRef>(undefined); const debounceTimeout = useRef>(undefined); - const { data: forecast, refresh: refreshForecast } = usePolling({ - fetcher: (signal?: AbortSignal) => metricsApi.getThroughputForecast(signal), - interval: 30_000, - enabled: true, - refetchKey: currentConnection?.id, + const connectionId = currentConnection?.id; + + const { data: forecast } = useQuery({ + queryKey: ['throughput-forecast', connectionId], + queryFn: ({ signal }) => metricsApi.getThroughputForecast(signal), + refetchInterval: 30_000, }); - // Load settings - useEffect(() => { - metricsApi - .getThroughputSettings() - .then(setSettings) - .catch(() => {}); - }, [currentConnection?.id]); + const { data: settings } = useQuery({ + queryKey: ['throughput-settings', connectionId], + queryFn: ({ signal }) => metricsApi.getThroughputSettings(signal), + }); - // Chart data - const [chartData, setChartData] = useState>( - [], - ); - useEffect(() => { - if (!settings) return; - const now = Date.now(); - metricsApi - .getStoredMemorySnapshots({ startTime: now - settings.rollingWindowMs, limit: 1500 }) - .then((snapshots) => { - const sorted = [...snapshots].sort((a, b) => a.timestamp - b.timestamp); - setChartData( - sorted.map((s) => ({ - time: s.timestamp, - ops: s.opsPerSec, - label: formatTime(s.timestamp), - })), - ); - }) - .catch(() => {}); - }, [currentConnection?.id, forecast, settings]); + const { data: chartData = [] } = useQuery({ + queryKey: ['throughput-chart', connectionId, settings?.rollingWindowMs], + queryFn: async () => { + const now = Date.now(); + const snapshots = await metricsApi.getStoredMemorySnapshots({ + startTime: now - settings!.rollingWindowMs, + limit: 1500, + }); + return [...snapshots] + .sort((a, b) => a.timestamp - b.timestamp) + .map((s) => ({ time: s.timestamp, ops: s.opsPerSec, label: formatTime(s.timestamp) })); + }, + enabled: !!settings, + refetchInterval: 30_000, + }); - const updateSetting = useCallback( - (updates: ThroughputSettingsUpdate) => { - if (debounceTimeout.current) clearTimeout(debounceTimeout.current); - setSettings((prev: ThroughputSettings | null) => - prev ? { ...prev, ...updates, updatedAt: Date.now() } : prev, - ); + const updateSetting = (updates: ThroughputSettingsUpdate) => { + if (debounceTimeout.current) clearTimeout(debounceTimeout.current); - debounceTimeout.current = setTimeout(async () => { - try { - const updated = await metricsApi.updateThroughputSettings(updates); - setSettings(updated); - setSaveStatus('saved'); - void refreshForecast(); - if (saveTimeout.current) clearTimeout(saveTimeout.current); - saveTimeout.current = setTimeout(() => setSaveStatus('idle'), 2000); - } catch { - setSaveStatus('error'); - } - }, 500); - }, - [refreshForecast], - ); + // Optimistic update + queryClient.setQueryData( + ['throughput-settings', connectionId], + (prev: typeof settings) => (prev ? { ...prev, ...updates, updatedAt: Date.now() } : prev), + ); + + debounceTimeout.current = setTimeout(async () => { + try { + const updated = await metricsApi.updateThroughputSettings(updates); + queryClient.setQueryData(['throughput-settings', connectionId], updated); + setSaveStatus('saved'); + await queryClient.invalidateQueries({ queryKey: ['throughput-forecast', connectionId] }); + if (saveTimeout.current) clearTimeout(saveTimeout.current); + saveTimeout.current = setTimeout(() => setSaveStatus('idle'), 2000); + } catch { + // Revert optimistic update + await queryClient.invalidateQueries({ queryKey: ['throughput-settings', connectionId] }); + setSaveStatus('error'); + } + }, 500); + }; // ── Page States ── diff --git a/apps/web/src/test/setup.ts b/apps/web/src/test/setup.ts new file mode 100644 index 00000000..bb02c60c --- /dev/null +++ b/apps/web/src/test/setup.ts @@ -0,0 +1 @@ +import '@testing-library/jest-dom/vitest'; diff --git a/apps/web/src/test/test-utils.tsx b/apps/web/src/test/test-utils.tsx new file mode 100644 index 00000000..32ecc389 --- /dev/null +++ b/apps/web/src/test/test-utils.tsx @@ -0,0 +1,42 @@ +import { QueryClient, QueryClientProvider } from '@tanstack/react-query'; +import { render, renderHook, type RenderOptions, type RenderHookOptions } from '@testing-library/react'; +import type { ReactNode } from 'react'; + +export function createTestQueryClient(): QueryClient { + return new QueryClient({ + defaultOptions: { + queries: { + retry: false, + gcTime: 0, + }, + }, + }); +} + +export function createWrapper(queryClient?: QueryClient) { + const client = queryClient ?? createTestQueryClient(); + return function Wrapper({ children }: { children: ReactNode }) { + return {children}; + }; +} + +export function renderWithQuery(ui: React.ReactElement, options?: Omit) { + const queryClient = createTestQueryClient(); + return { + ...render(ui, { wrapper: createWrapper(queryClient), ...options }), + queryClient, + }; +} + +export function renderHookWithQuery( + hook: () => T, + options?: Omit, 'wrapper'>, +) { + const queryClient = createTestQueryClient(); + return { + ...renderHook(hook, { wrapper: createWrapper(queryClient), ...options }), + queryClient, + }; +} + +export { waitFor, screen, act } from '@testing-library/react'; diff --git a/apps/web/vitest.config.ts b/apps/web/vitest.config.ts new file mode 100644 index 00000000..ee58b30b --- /dev/null +++ b/apps/web/vitest.config.ts @@ -0,0 +1,18 @@ +import { defineConfig } from 'vitest/config'; +import react from '@vitejs/plugin-react'; +import path from 'path'; + +export default defineConfig({ + plugins: [react()], + resolve: { + alias: { + '@betterdb/shared': path.resolve(__dirname, '../../packages/shared/src/index.ts'), + '@betterdb/shared/license': path.resolve(__dirname, '../../packages/shared/src/license/index.ts'), + }, + }, + test: { + environment: 'happy-dom', + setupFiles: ['./src/test/setup.ts'], + globals: true, + }, +}); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index d159172e..02266d9f 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -210,6 +210,9 @@ importers: '@radix-ui/react-tabs': specifier: ^1.1.13 version: 1.1.13(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + '@tanstack/react-query': + specifier: ^5.95.2 + version: 5.95.2(react@19.2.4) class-variance-authority: specifier: ^0.7.1 version: 0.7.1 @@ -259,6 +262,12 @@ importers: '@tailwindcss/vite': specifier: ^4.2.2 version: 4.2.2(vite@8.0.2(@types/node@22.19.15)(esbuild@0.27.4)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) + '@testing-library/jest-dom': + specifier: ^6.9.1 + version: 6.9.1 + '@testing-library/react': + specifier: ^16.3.2 + version: 16.3.2(@testing-library/dom@10.4.1)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) '@types/d3': specifier: ^7.4.3 version: 7.4.3 @@ -286,6 +295,9 @@ importers: globals: specifier: ^17.4.0 version: 17.4.0 + happy-dom: + specifier: ^20.8.9 + version: 20.8.9 react-grab: specifier: ^0.1.28 version: 0.1.29(@types/react@19.2.14)(react@19.2.4) @@ -303,7 +315,7 @@ importers: version: 8.0.2(@types/node@22.19.15)(esbuild@0.27.4)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2) vitest: specifier: ^4.1.1 - version: 4.1.1(@opentelemetry/api@1.9.0)(@types/node@22.19.15)(vite@8.0.2(@types/node@22.19.15)(esbuild@0.27.4)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) + version: 4.1.1(@opentelemetry/api@1.9.0)(@types/node@22.19.15)(happy-dom@20.8.9)(vite@8.0.2(@types/node@22.19.15)(esbuild@0.27.4)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) packages/agent: dependencies: @@ -403,7 +415,7 @@ importers: version: 5.9.3 vitest: specifier: ^4.1.1 - version: 4.1.1(@opentelemetry/api@1.9.0)(@types/node@22.19.15)(vite@8.0.2(@types/node@22.19.15)(esbuild@0.27.4)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) + version: 4.1.1(@opentelemetry/api@1.9.0)(@types/node@22.19.15)(happy-dom@20.8.9)(vite@8.0.2(@types/node@22.19.15)(esbuild@0.27.4)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) packages/shared: devDependencies: @@ -485,10 +497,13 @@ importers: version: 5.9.3 vitest: specifier: ^4.1.1 - version: 4.1.1(@opentelemetry/api@1.9.0)(@types/node@22.19.15)(vite@8.0.2(@types/node@22.19.15)(esbuild@0.27.4)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) + version: 4.1.1(@opentelemetry/api@1.9.0)(@types/node@22.19.15)(happy-dom@20.8.9)(vite@8.0.2(@types/node@22.19.15)(esbuild@0.27.4)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) packages: + '@adobe/css-tools@4.4.4': + resolution: {integrity: sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==} + '@ai-sdk/gateway@3.0.80': resolution: {integrity: sha512-uM7kpZB5l977lW7+2X1+klBUxIZQ78+1a9jHlaHFEzcOcmmslTl3sdP0QqfuuBcO0YBM2gwOiqVdp8i4TRQYcw==} engines: {node: '>=18'} @@ -693,6 +708,10 @@ packages: peerDependencies: '@babel/core': ^7.0.0-0 + '@babel/runtime@7.29.2': + resolution: {integrity: sha512-JiDShH45zKHWyGe4ZNVRrCjBz8Nh9TMmZG1kh4QTK8hCBTWBi8Da+i7s1fJw7/lYpM4ccepSNfqzZ/QvABBi5g==} + engines: {node: '>=6.9.0'} + '@babel/template@7.27.2': resolution: {integrity: sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==} engines: {node: '>=6.9.0'} @@ -2590,6 +2609,37 @@ packages: peerDependencies: vite: ^5.2.0 || ^6 || ^7 || ^8 + '@tanstack/query-core@5.95.2': + resolution: {integrity: sha512-o4T8vZHZET4Bib3jZ/tCW9/7080urD4c+0/AUaYVpIqOsr7y0reBc1oX3ttNaSW5mYyvZHctiQ/UOP2PfdmFEQ==} + + '@tanstack/react-query@5.95.2': + resolution: {integrity: sha512-/wGkvLj/st5Ud1Q76KF1uFxScV7WeqN1slQx5280ycwAyYkIPGaRZAEgHxe3bjirSd5Zpwkj6zNcR4cqYni/ZA==} + peerDependencies: + react: ^18 || ^19 + + '@testing-library/dom@10.4.1': + resolution: {integrity: sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==} + engines: {node: '>=18'} + + '@testing-library/jest-dom@6.9.1': + resolution: {integrity: sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA==} + engines: {node: '>=14', npm: '>=6', yarn: '>=1'} + + '@testing-library/react@16.3.2': + resolution: {integrity: sha512-XU5/SytQM+ykqMnAnvB2umaJNIOsLF3PVv//1Ew4CTcpz0/BRyy/af40qqrt7SjKpDdT1saBMc42CUok5gaw+g==} + engines: {node: '>=18'} + peerDependencies: + '@testing-library/dom': ^10.0.0 + '@types/react': ^18.0.0 || ^19.0.0 + '@types/react-dom': ^18.0.0 || ^19.0.0 + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + '@tokenizer/inflate@0.4.1': resolution: {integrity: sha512-2mAv+8pkG6GIZiF1kNg1jAjh27IDxEPKwdGul3snfztFerfPGI1LjDezZp3i7BElXompqEtPmoPx6c2wgtWsOA==} engines: {node: '>=18'} @@ -2642,6 +2692,9 @@ packages: '@tybys/wasm-util@0.10.1': resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==} + '@types/aria-query@5.0.4': + resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==} + '@types/babel__core@7.20.5': resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} @@ -2871,6 +2924,9 @@ packages: '@types/validator@13.15.10': resolution: {integrity: sha512-T8L6i7wCuyoK8A/ZeLYt1+q0ty3Zb9+qbSSvrIVitzT3YjZqkTZ40IbRsPanlB4h1QB3JVL1SYCdR6ngtFYcuA==} + '@types/whatwg-mimetype@3.0.2': + resolution: {integrity: sha512-c2AKvDT8ToxLIOUlN51gTiHXflsfIFisS4pO7pDPoKouJCESkhZnEy623gwP9laCy5lnLDAw1vAzu2vM2YLOrA==} + '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} @@ -3276,6 +3332,13 @@ packages: argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + aria-query@5.3.0: + resolution: {integrity: sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==} + + aria-query@5.3.2: + resolution: {integrity: sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==} + engines: {node: '>= 0.4'} + array-back@6.2.2: resolution: {integrity: sha512-gUAZ7HPyb4SJczXAMUXMGAvI976JoK3qEx9v1FTmeYuJj0IBiaKttG1ydtGKdkfqWkIkouke7nG8ufGy77+Cvw==} engines: {node: '>=12.17'} @@ -3728,6 +3791,9 @@ packages: resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} engines: {node: '>= 8'} + css.escape@1.5.1: + resolution: {integrity: sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==} + csstype@3.2.3: resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==} @@ -3950,6 +4016,12 @@ packages: resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} engines: {node: '>=0.3.1'} + dom-accessibility-api@0.5.16: + resolution: {integrity: sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==} + + dom-accessibility-api@0.6.3: + resolution: {integrity: sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==} + dotenv-expand@12.0.3: resolution: {integrity: sha512-uc47g4b+4k/M/SeaW1y4OApx+mtLWl92l5LMPP0GNXctZqELk+YGgOPIIC5elYmUH4OuoK3JLhuRUYegeySiFA==} engines: {node: '>=12'} @@ -4020,6 +4092,10 @@ packages: resolution: {integrity: sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA==} engines: {node: '>=10.13.0'} + entities@7.0.1: + resolution: {integrity: sha512-TWrgLOFUQTH994YUyl1yT4uyavY5nNB5muff+RtWaqNVCAK408b5ZnnbNAUEWLTCpum9w6arT70i1XdQ4UeOPA==} + engines: {node: '>=0.12'} + error-ex@1.3.4: resolution: {integrity: sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==} @@ -4481,6 +4557,10 @@ packages: engines: {node: '>=0.4.7'} hasBin: true + happy-dom@20.8.9: + resolution: {integrity: sha512-Tz23LR9T9jOGVZm2x1EPdXqwA37G/owYMxRwU0E4miurAtFsPMQ1d2Jc2okUaSjZqAFz2oEn3FLXC5a0a+siyA==} + engines: {node: '>=20.0.0'} + has-flag@3.0.0: resolution: {integrity: sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==} engines: {node: '>=4'} @@ -4592,6 +4672,10 @@ packages: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} + indent-string@4.0.0: + resolution: {integrity: sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==} + engines: {node: '>=8'} + indent-string@5.0.0: resolution: {integrity: sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==} engines: {node: '>=12'} @@ -5199,6 +5283,10 @@ packages: resolution: {integrity: sha512-vtEhXh/gNjI9Yg1u4jX/0YVPMvxzHuGgCm6tC5kZyb08yjGWGnqAjGJvcXbqQR2P3MyMEFnRbpcdFS6PBcLqew==} engines: {node: '>=12'} + lz-string@1.5.0: + resolution: {integrity: sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==} + hasBin: true + magic-string@0.30.17: resolution: {integrity: sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==} @@ -5287,6 +5375,10 @@ packages: resolution: {integrity: sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==} engines: {node: '>=10'} + min-indent@1.0.1: + resolution: {integrity: sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==} + engines: {node: '>=4'} + minimatch@10.2.4: resolution: {integrity: sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==} engines: {node: 18 || 20 || >=22} @@ -5738,6 +5830,10 @@ packages: engines: {node: '>=14'} hasBin: true + pretty-format@27.5.1: + resolution: {integrity: sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==} + engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} + pretty-format@30.2.0: resolution: {integrity: sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==} engines: {node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0} @@ -5862,6 +5958,9 @@ packages: react: optional: true + react-is@17.0.2: + resolution: {integrity: sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==} + react-is@18.3.1: resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} @@ -5939,6 +6038,10 @@ packages: react-dom: ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 react-is: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + redent@3.0.0: + resolution: {integrity: sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==} + engines: {node: '>=8'} + redis-errors@1.2.0: resolution: {integrity: sha512-1qny3OExCf0UvUV/5wpYKf2YwPcOqXzkwKKSmKHiE6ZMQs5heeE/c8eXK+PNllPvmjgAbfnsbpkGZWy8cBpn9w==} engines: {node: '>=4'} @@ -6298,6 +6401,10 @@ packages: resolution: {integrity: sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==} engines: {node: '>=12'} + strip-indent@3.0.0: + resolution: {integrity: sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==} + engines: {node: '>=8'} + strip-json-comments@2.0.1: resolution: {integrity: sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==} engines: {node: '>=0.10.0'} @@ -6791,6 +6898,10 @@ packages: whatwg-fetch@3.6.20: resolution: {integrity: sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==} + whatwg-mimetype@3.0.0: + resolution: {integrity: sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==} + engines: {node: '>=12'} + whatwg-url@5.0.0: resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} @@ -6904,6 +7015,8 @@ packages: snapshots: + '@adobe/css-tools@4.4.4': {} + '@ai-sdk/gateway@3.0.80(zod@4.3.6)': dependencies: '@ai-sdk/provider': 3.0.8 @@ -7154,6 +7267,8 @@ snapshots: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 + '@babel/runtime@7.29.2': {} + '@babel/template@7.27.2': dependencies: '@babel/code-frame': 7.27.1 @@ -8802,6 +8917,43 @@ snapshots: tailwindcss: 4.2.2 vite: 8.0.2(@types/node@22.19.15)(esbuild@0.27.4)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2) + '@tanstack/query-core@5.95.2': {} + + '@tanstack/react-query@5.95.2(react@19.2.4)': + dependencies: + '@tanstack/query-core': 5.95.2 + react: 19.2.4 + + '@testing-library/dom@10.4.1': + dependencies: + '@babel/code-frame': 7.27.1 + '@babel/runtime': 7.29.2 + '@types/aria-query': 5.0.4 + aria-query: 5.3.0 + dom-accessibility-api: 0.5.16 + lz-string: 1.5.0 + picocolors: 1.1.1 + pretty-format: 27.5.1 + + '@testing-library/jest-dom@6.9.1': + dependencies: + '@adobe/css-tools': 4.4.4 + aria-query: 5.3.2 + css.escape: 1.5.1 + dom-accessibility-api: 0.6.3 + picocolors: 1.1.1 + redent: 3.0.0 + + '@testing-library/react@16.3.2(@testing-library/dom@10.4.1)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': + dependencies: + '@babel/runtime': 7.29.2 + '@testing-library/dom': 10.4.1 + react: 19.2.4 + react-dom: 19.2.4(react@19.2.4) + optionalDependencies: + '@types/react': 19.2.14 + '@types/react-dom': 19.2.3(@types/react@19.2.14) + '@tokenizer/inflate@0.4.1': dependencies: debug: 4.4.3(supports-color@5.5.0) @@ -8842,6 +8994,8 @@ snapshots: tslib: 2.8.1 optional: true + '@types/aria-query@5.0.4': {} + '@types/babel__core@7.20.5': dependencies: '@babel/parser': 7.28.5 @@ -9116,6 +9270,8 @@ snapshots: '@types/validator@13.15.10': {} + '@types/whatwg-mimetype@3.0.2': {} + '@types/ws@8.18.1': dependencies: '@types/node': 22.19.15 @@ -9543,6 +9699,12 @@ snapshots: argparse@2.0.1: {} + aria-query@5.3.0: + dependencies: + dequal: 2.0.3 + + aria-query@5.3.2: {} + array-back@6.2.2: {} array-timsort@1.0.3: {} @@ -10008,6 +10170,8 @@ snapshots: shebang-command: 2.0.0 which: 2.0.2 + css.escape@1.5.1: {} + csstype@3.2.3: {} d3-array@3.2.4: @@ -10221,6 +10385,10 @@ snapshots: diff@4.0.2: {} + dom-accessibility-api@0.5.16: {} + + dom-accessibility-api@0.6.3: {} + dotenv-expand@12.0.3: dependencies: dotenv: 16.6.1 @@ -10285,6 +10453,8 @@ snapshots: graceful-fs: 4.2.11 tapable: 2.3.0 + entities@7.0.1: {} + error-ex@1.3.4: dependencies: is-arrayish: 0.2.1 @@ -10875,6 +11045,18 @@ snapshots: optionalDependencies: uglify-js: 3.19.3 + happy-dom@20.8.9: + dependencies: + '@types/node': 22.19.15 + '@types/whatwg-mimetype': 3.0.2 + '@types/ws': 8.18.1 + entities: 7.0.1 + whatwg-mimetype: 3.0.0 + ws: 8.20.0 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + has-flag@3.0.0: {} has-flag@4.0.0: {} @@ -10980,6 +11162,8 @@ snapshots: imurmurhash@0.1.4: {} + indent-string@4.0.0: {} + indent-string@5.0.0: {} inherits@2.0.4: {} @@ -11731,6 +11915,8 @@ snapshots: luxon@3.7.2: {} + lz-string@1.5.0: {} + magic-string@0.30.17: dependencies: '@jridgewell/sourcemap-codec': 1.5.5 @@ -11794,6 +11980,8 @@ snapshots: mimic-response@3.1.0: {} + min-indent@1.0.1: {} + minimatch@10.2.4: dependencies: brace-expansion: 5.0.5 @@ -12212,6 +12400,12 @@ snapshots: prettier@3.8.1: {} + pretty-format@27.5.1: + dependencies: + ansi-regex: 5.0.1 + ansi-styles: 5.2.0 + react-is: 17.0.2 + pretty-format@30.2.0: dependencies: '@jest/schemas': 30.0.5 @@ -12351,6 +12545,8 @@ snapshots: transitivePeerDependencies: - '@types/react' + react-is@17.0.2: {} + react-is@18.3.1: {} react-is@19.2.0: {} @@ -12433,6 +12629,11 @@ snapshots: - '@types/react' - redux + redent@3.0.0: + dependencies: + indent-string: 4.0.0 + strip-indent: 3.0.0 + redis-errors@1.2.0: {} redis-parser@3.0.0: @@ -12814,6 +13015,10 @@ snapshots: strip-final-newline@3.0.0: {} + strip-indent@3.0.0: + dependencies: + min-indent: 1.0.1 + strip-json-comments@2.0.1: {} strip-json-comments@3.1.1: {} @@ -13243,7 +13448,7 @@ snapshots: terser: 5.44.1 yaml: 2.8.2 - vitest@4.1.1(@opentelemetry/api@1.9.0)(@types/node@22.19.15)(vite@8.0.2(@types/node@22.19.15)(esbuild@0.27.4)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)): + vitest@4.1.1(@opentelemetry/api@1.9.0)(@types/node@22.19.15)(happy-dom@20.8.9)(vite@8.0.2(@types/node@22.19.15)(esbuild@0.27.4)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)): dependencies: '@vitest/expect': 4.1.1 '@vitest/mocker': 4.1.1(vite@8.0.2(@types/node@22.19.15)(esbuild@0.27.4)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)) @@ -13268,6 +13473,7 @@ snapshots: optionalDependencies: '@opentelemetry/api': 1.9.0 '@types/node': 22.19.15 + happy-dom: 20.8.9 transitivePeerDependencies: - msw @@ -13358,6 +13564,8 @@ snapshots: whatwg-fetch@3.6.20: {} + whatwg-mimetype@3.0.0: {} + whatwg-url@5.0.0: dependencies: tr46: 0.0.3 diff --git a/scripts/demo/throughput-ramp.sh b/scripts/demo/throughput-ramp.sh new file mode 100755 index 00000000..d0278882 --- /dev/null +++ b/scripts/demo/throughput-ramp.sh @@ -0,0 +1,236 @@ +#!/bin/zsh + +# Simulates a gradually increasing throughput load on Valkey for testing +# the Throughput Forecasting feature. +# +# Usage: +# ./scripts/demo/throughput-ramp.sh [options] +# +# Options: +# -h, --host Valkey host (default: localhost) +# -p, --port Valkey port (default: 6380) +# -a, --auth Password (default: devpassword) +# -d, --duration Total duration in minutes (default: 60) +# -s, --start-rps Starting requests per second (default: 100) +# -e, --end-rps Ending requests per second (default: 5000) +# --pattern Load pattern: ramp|spike|wave (default: ramp) +# --grow-keys Write unique keys each tick so memory grows over time +# --value-size Value size in bytes for --grow-keys (default: 1024) +# --cleanup Remove generated keys on exit +# +# Patterns: +# ramp - Linear increase from start-rps to end-rps over duration +# spike - Steady at start-rps, then sudden jump to end-rps at 75% of duration +# wave - Oscillates between start-rps and end-rps with 10-minute period + +set -eo pipefail + +HOST="localhost" +PORT="6380" +AUTH="" +DURATION_MIN=60 +START_RPS=100 +END_RPS=5000 +PATTERN="ramp" +GROW_KEYS=false +VALUE_SIZE=1024 +CLEANUP=false +KEY_PREFIX="throughput_test" + +while [[ $# -gt 0 ]]; do + case $1 in + -h|--host) HOST="$2"; shift 2;; + -p|--port) PORT="$2"; shift 2;; + -a|--auth) AUTH="$2"; shift 2;; + -d|--duration) DURATION_MIN="$2"; shift 2;; + -s|--start-rps) START_RPS="$2"; shift 2;; + -e|--end-rps) END_RPS="$2"; shift 2;; + --pattern) PATTERN="$2"; shift 2;; + --grow-keys) GROW_KEYS=true; shift;; + --value-size) VALUE_SIZE="$2"; shift 2;; + --cleanup) CLEANUP=true; shift;; + *) echo "Unknown option: $1"; exit 1;; + esac +done + +USE_DOCKER=false +CLI="" +AUTH_ARGS=() +[[ -n "$AUTH" ]] && AUTH_ARGS=(-a "$AUTH") + +if command -v valkey-cli &> /dev/null; then + CLI="valkey-cli" +elif command -v redis-cli &> /dev/null; then + CLI="redis-cli" +elif docker exec betterdb-monitor-valkey valkey-cli "${AUTH_ARGS[@]}" PING > /dev/null 2>&1; then + USE_DOCKER=true + echo " Using docker exec (no local CLI found)" +else + echo "Error: No valkey-cli, redis-cli, or running Docker container found" + exit 1 +fi + +# Build CLI command as an array to preserve argument boundaries +if $USE_DOCKER; then + CLI_CMD=(docker exec -i betterdb-monitor-valkey valkey-cli "${AUTH_ARGS[@]}") + CLI_PIPE=(docker exec -i betterdb-monitor-valkey valkey-cli "${AUTH_ARGS[@]}" --pipe) +else + CLI_CMD=("$CLI" -h "$HOST" -p "$PORT" "${AUTH_ARGS[@]}") + CLI_PIPE=("$CLI" -h "$HOST" -p "$PORT" "${AUTH_ARGS[@]}" --pipe) +fi + +# Verify connection +if ! "${CLI_CMD[@]}" PING > /dev/null 2>&1; then + echo "Error: Cannot connect to Valkey" + exit 1 +fi + +DURATION_SEC=$((DURATION_MIN * 60)) +TICK_SEC=1 # Adjust load every 1 second for smoother throughput +TOTAL_TICKS=$((DURATION_SEC / TICK_SEC)) +KEY_COUNTER=0 + +cleanup() { + echo "" + echo "Stopping load generation..." + + if $CLEANUP; then + echo "Cleaning up keys..." + cursor=0 + while true; do + result=$("${CLI_CMD[@]}" SCAN "$cursor" MATCH "${KEY_PREFIX}_*" COUNT 1000 2>/dev/null) + cursor=$(echo "$result" | head -1) + keys=$(echo "$result" | tail -n +2) + if [[ -n "$keys" ]]; then + echo "$keys" | xargs "${CLI_CMD[@]}" DEL > /dev/null 2>&1 + fi + [[ "$cursor" == "0" ]] && break + done + echo "Cleanup complete." + fi +} + +trap cleanup EXIT INT TERM + +get_target_rps() { + local tick=$1 + + case $PATTERN in + ramp) + # Linear interpolation from START_RPS to END_RPS + local progress + progress=$(echo "scale=4; $tick / $TOTAL_TICKS" | bc) + echo "scale=0; $START_RPS + ($END_RPS - $START_RPS) * $progress / 1" | bc + ;; + spike) + # Steady at START_RPS, jump to END_RPS at 75% duration + local threshold=$((TOTAL_TICKS * 3 / 4)) + if [[ $tick -lt $threshold ]]; then + echo "$START_RPS" + else + echo "$END_RPS" + fi + ;; + wave) + # Sinusoidal oscillation with 10-minute period + local mid=$(( (START_RPS + END_RPS) / 2 )) + local amp=$(( (END_RPS - START_RPS) / 2 )) + local period_ticks=$((600 / TICK_SEC)) # 10 min period + local angle + angle=$(echo "scale=6; 3.14159 * 2 * $tick / $period_ticks" | bc) + local sin_val + sin_val=$(echo "scale=6; s($angle)" | bc -l) + echo "scale=0; $mid + $amp * $sin_val / 1" | bc + ;; + *) + echo "$START_RPS" + ;; + esac +} + +# Pre-generate the value payload for --grow-keys mode +if $GROW_KEYS; then + VALUE_PAYLOAD=$(head -c "$VALUE_SIZE" < /dev/zero | tr '\0' 'x') +fi + +# Generate load at target RPS for one tick (TICK_SEC seconds) +# Spreads commands evenly across the tick in small batches (every 100ms) +# so Valkey's instantaneous_ops_per_sec rolling average stays accurate. +run_tick() { + local target_rps=$1 + local total_ops=$((target_rps * TICK_SEC)) + if ((total_ops < 1)); then total_ops=1; fi + + # Split into 10 batches per second (every 100ms) so the load is steady + local batches_per_sec=10 + local batch_count=$((TICK_SEC * batches_per_sec)) + local ops_per_batch=$(( (total_ops + batch_count - 1) / batch_count )) + local delay=$(printf "%.3f" $(echo "scale=3; 1.0 / $batches_per_sec" | bc)) + + local sent=0 + for ((b = 0; b < batch_count && sent < total_ops; b++)); do + local this_batch=$ops_per_batch + ((sent + this_batch > total_ops)) && this_batch=$((total_ops - sent)) + + local batch="" + if $GROW_KEYS; then + local val_len=${#VALUE_PAYLOAD} + for ((i = 0; i < this_batch; i++)); do + local key="${KEY_PREFIX}_${KEY_COUNTER}" + KEY_COUNTER=$((KEY_COUNTER + 1)) + batch+="*3\r\n\$3\r\nSET\r\n\$${#key}\r\n${key}\r\n\$${val_len}\r\n${VALUE_PAYLOAD}\r\n" + done + else + for ((i = 0; i < this_batch; i++)); do + batch+="*1\r\n\$4\r\nPING\r\n" + done + fi + printf "$batch" | "${CLI_PIPE[@]}" > /dev/null 2>&1 + sent=$((sent + this_batch)) + + sleep "$delay" + done +} + +echo "============================================" +echo " Throughput Ramp - Load Generator" +echo "============================================" +echo "" +echo " Target: $HOST:$PORT" +echo " Pattern: $PATTERN" +echo " Duration: ${DURATION_MIN}m" +echo " Start RPS: $START_RPS" +echo " End RPS: $END_RPS" +echo " Grow keys: $GROW_KEYS" +if $GROW_KEYS; then +echo " Value size: ${VALUE_SIZE}B" +fi +echo " Cleanup: $CLEANUP" +echo "" +echo " Press Ctrl+C to stop" +echo "" + +START_TIME=$(date +%s) + +for ((tick = 0; tick < TOTAL_TICKS; tick++)); do + target_rps=$(get_target_rps $tick) + elapsed_min=$(( (tick * TICK_SEC) / 60 )) + remaining_min=$(( (DURATION_SEC - tick * TICK_SEC) / 60 )) + + # Progress bar + local pct=$((tick * 100 / TOTAL_TICKS)) + local bar_len=20 + local filled=$((pct * bar_len / 100)) + local empty=$((bar_len - filled)) + local bar=$(printf '%0.s█' $(seq 1 $filled 2>/dev/null))$(printf '%0.s░' $(seq 1 $empty 2>/dev/null)) + + printf "\r %s %3d%% | %3dm/%dm | %5d ops/sec | %s | %dm left " \ + "$bar" "$pct" "$elapsed_min" "$DURATION_MIN" "$target_rps" "$PATTERN" "$remaining_min" + + run_tick "$target_rps" +done + +echo "" +echo "" +echo "Load generation complete." +echo "Total runtime: $(( $(date +%s) - START_TIME ))s" From 10f5bfd097301a7bbadb680b00b2a03df6948931 Mon Sep 17 00:00:00 2001 From: Petar Dzhambazov Date: Tue, 31 Mar 2026 14:48:16 +0300 Subject: [PATCH 07/20] Generic metric forecasting (#63) Generalize throughput forecasting into a MetricForecastingService parameterized by MetricKind. Add metric extractors, ceiling resolvers (with auto-detect for memory maxmemory), unified storage table, REST endpoints, Prometheus export, and a tabbed frontend page at /forecasting. Add tests for checkAlerts dispatch, ceiling-exceeded paths, falling/ stable trends for non-ops metrics, zero-slope behavior, connection isolation, data sufficiency boundaries, formatter edge cases, and validation pipe edge cases. Add `docker-compose.test.yml` for isolated test environments using dedicated containers and ports. Refactor metric forecasting to use actual values for fast spike detection. Enhance tier validation with `DEV_LICENSE_TIER` override for development. Adjust global test setup/teardown scripts for test containers. Aligns setting names with the generic metric forecasting feature, replacing the old throughput-specific naming. DB column names are unchanged to avoid requiring a migration. Improve logging in `checkAlerts` for uninitialized services. Add safe fallback values for hysteresis recovery during webhook dispatch. Refactor to use `WebhookEventType` for consistency. Update tests to validate dispatch behavior with recovery values. --------- Co-authored-by: Kristiyan Ivanov --- apps/api/package.json | 4 +- apps/api/src/app.module.ts | 4 +- .../interfaces/storage-port.interface.ts | 18 +- .../__tests__/ceiling-resolvers.spec.ts | 100 +++ .../__tests__/metric-extractors.spec.ts | 47 ++ .../metric-forecasting.service.spec.ts | 730 ++++++++++++++++++ .../metric-kind-validation.pipe.spec.ts | 29 + .../metric-forecasting/ceiling-resolvers.ts | 23 + .../metric-forecasting/metric-extractors.ts | 11 + .../metric-forecasting.controller.ts | 40 + .../metric-forecasting.module.ts | 13 + .../metric-forecasting.service.ts} | 203 ++--- .../pipes/metric-kind-validation.pipe.ts | 16 + apps/api/src/prometheus/prometheus.module.ts | 4 +- apps/api/src/prometheus/prometheus.service.ts | 52 +- apps/api/src/settings/settings.service.ts | 12 +- .../src/storage/adapters/base-sql.adapter.ts | 6 +- .../src/storage/adapters/memory.adapter.ts | 63 +- .../src/storage/adapters/postgres.adapter.ts | 93 +-- .../src/storage/adapters/sqlite.adapter.ts | 104 +-- .../throughput-forecasting.service.spec.ts | 554 ------------- .../throughput-forecasting.controller.ts | 27 - .../throughput-forecasting.module.ts | 13 - .../__tests__/webhooks.service.spec.ts | 2 - .../webhooks/webhook-dispatcher.service.ts | 97 ++- apps/api/test/global-setup.ts | 65 +- apps/api/test/global-teardown.ts | 16 +- apps/api/test/setup-env.ts | 4 +- apps/web/src/App.tsx | 20 +- apps/web/src/api/metric-forecasting.ts | 21 + apps/web/src/api/metrics.ts | 14 - .../MetricChart.tsx} | 90 +-- .../metric-forecasting/MetricDisabled.tsx | 11 + .../metric-forecasting/MetricForecastCard.tsx | 61 ++ .../MetricInsufficientData.tsx | 22 + .../metric-forecasting/MetricLoading.tsx | 10 + .../MetricSettingsPanel.tsx} | 57 +- .../__tests__/formatters.test.ts | 69 ++ .../pages/metric-forecasting/formatters.ts | 59 ++ .../pages/metric-forecasting/index.ts | 7 + ...hroughput-forecasting-settings-section.tsx | 64 -- .../pages/throughput-forecasting/Disabled.tsx | 33 - .../throughput-forecasting/ForecastCard.tsx | 51 -- .../InsufficientData.tsx | 14 - .../pages/throughput-forecasting/Loading.tsx | 8 - .../pages/throughput-forecasting/index.ts | 7 - .../pages/throughput-forecasting/utils.ts | 24 - .../src/components/webhooks/WebhookForm.tsx | 2 +- apps/web/src/pages/MetricForecasting.tsx | 155 ++++ apps/web/src/pages/Settings.tsx | 29 +- apps/web/src/pages/ThroughputForecasting.tsx | 102 --- .../pages/metric-forecasting-extractors.ts | 11 + docker-compose.test.yml | 56 ++ packages/shared/src/index.ts | 2 +- .../src/types/metric-forecasting.types.ts | 87 +++ packages/shared/src/types/settings.types.ts | 6 +- packages/shared/src/types/throughput.types.ts | 32 - packages/shared/src/webhooks/types.ts | 18 +- .../__tests__/throughput-limit.spec.ts | 29 +- .../webhook-pro/webhook-events-pro.service.ts | 46 +- scripts/demo/throughput-ramp.mjs | 264 +++++++ scripts/demo/throughput-ramp.sh | 236 ------ 62 files changed, 2415 insertions(+), 1652 deletions(-) create mode 100644 apps/api/src/metric-forecasting/__tests__/ceiling-resolvers.spec.ts create mode 100644 apps/api/src/metric-forecasting/__tests__/metric-extractors.spec.ts create mode 100644 apps/api/src/metric-forecasting/__tests__/metric-forecasting.service.spec.ts create mode 100644 apps/api/src/metric-forecasting/__tests__/metric-kind-validation.pipe.spec.ts create mode 100644 apps/api/src/metric-forecasting/ceiling-resolvers.ts create mode 100644 apps/api/src/metric-forecasting/metric-extractors.ts create mode 100644 apps/api/src/metric-forecasting/metric-forecasting.controller.ts create mode 100644 apps/api/src/metric-forecasting/metric-forecasting.module.ts rename apps/api/src/{throughput-forecasting/throughput-forecasting.service.ts => metric-forecasting/metric-forecasting.service.ts} (54%) create mode 100644 apps/api/src/metric-forecasting/pipes/metric-kind-validation.pipe.ts delete mode 100644 apps/api/src/throughput-forecasting/__tests__/throughput-forecasting.service.spec.ts delete mode 100644 apps/api/src/throughput-forecasting/throughput-forecasting.controller.ts delete mode 100644 apps/api/src/throughput-forecasting/throughput-forecasting.module.ts create mode 100644 apps/web/src/api/metric-forecasting.ts rename apps/web/src/components/pages/{throughput-forecasting/ThroughputChart.tsx => metric-forecasting/MetricChart.tsx} (53%) create mode 100644 apps/web/src/components/pages/metric-forecasting/MetricDisabled.tsx create mode 100644 apps/web/src/components/pages/metric-forecasting/MetricForecastCard.tsx create mode 100644 apps/web/src/components/pages/metric-forecasting/MetricInsufficientData.tsx create mode 100644 apps/web/src/components/pages/metric-forecasting/MetricLoading.tsx rename apps/web/src/components/pages/{throughput-forecasting/SettingsPanel.tsx => metric-forecasting/MetricSettingsPanel.tsx} (50%) create mode 100644 apps/web/src/components/pages/metric-forecasting/__tests__/formatters.test.ts create mode 100644 apps/web/src/components/pages/metric-forecasting/formatters.ts create mode 100644 apps/web/src/components/pages/metric-forecasting/index.ts delete mode 100644 apps/web/src/components/pages/settings/throughput-forecasting-settings-section.tsx delete mode 100644 apps/web/src/components/pages/throughput-forecasting/Disabled.tsx delete mode 100644 apps/web/src/components/pages/throughput-forecasting/ForecastCard.tsx delete mode 100644 apps/web/src/components/pages/throughput-forecasting/InsufficientData.tsx delete mode 100644 apps/web/src/components/pages/throughput-forecasting/Loading.tsx delete mode 100644 apps/web/src/components/pages/throughput-forecasting/index.ts delete mode 100644 apps/web/src/components/pages/throughput-forecasting/utils.ts create mode 100644 apps/web/src/pages/MetricForecasting.tsx delete mode 100644 apps/web/src/pages/ThroughputForecasting.tsx create mode 100644 apps/web/src/pages/metric-forecasting-extractors.ts create mode 100644 docker-compose.test.yml create mode 100644 packages/shared/src/types/metric-forecasting.types.ts delete mode 100644 packages/shared/src/types/throughput.types.ts create mode 100755 scripts/demo/throughput-ramp.mjs delete mode 100755 scripts/demo/throughput-ramp.sh diff --git a/apps/api/package.json b/apps/api/package.json index c11d17c8..299a3ce4 100644 --- a/apps/api/package.json +++ b/apps/api/package.json @@ -23,8 +23,8 @@ "test:integration:audit": "jest test/api-audit.e2e-spec.ts", "test:integration:client-analytics": "jest test/api-client-analytics.e2e-spec.ts", "test:integration:full": "jest test/api-full-flow.e2e-spec.ts", - "test:integration:redis": "TEST_DB_PORT=6382 jest test/database-compatibility.e2e-spec.ts", - "test:integration:valkey": "TEST_DB_PORT=6380 jest --testRegex='.e2e-spec.ts$'", + "test:integration:redis": "TEST_DB_PORT=6392 jest test/database-compatibility.e2e-spec.ts", + "test:integration:valkey": "TEST_DB_PORT=6390 jest --testRegex='.e2e-spec.ts$'", "test:cluster": "jest test/api-cluster.e2e-spec.ts", "test:cluster:unit": "jest src/cluster/*.spec.ts", "test:integration:cluster": "TEST_DB_HOST=localhost TEST_DB_PORT=7001 jest test/api-cluster.e2e-spec.ts", diff --git a/apps/api/src/app.module.ts b/apps/api/src/app.module.ts index 6b2bbfd5..260e9391 100644 --- a/apps/api/src/app.module.ts +++ b/apps/api/src/app.module.ts @@ -18,7 +18,7 @@ import { TelemetryModule } from './telemetry/telemetry.module'; import { VectorSearchModule } from './vector-search/vector-search.module'; import { CloudAuthModule } from './auth/cloud-auth.module'; import { McpModule } from './mcp/mcp.module'; -import { ThroughputForecastingModule } from './throughput-forecasting/throughput-forecasting.module'; +import { MetricForecastingModule } from './metric-forecasting/metric-forecasting.module'; let AiModule: any = null; let LicenseModule: any = null; @@ -120,7 +120,7 @@ const baseImports = [ WebhooksModule, McpModule, VectorSearchModule, - ThroughputForecastingModule, + MetricForecastingModule, ]; const proprietaryImports = [ diff --git a/apps/api/src/common/interfaces/storage-port.interface.ts b/apps/api/src/common/interfaces/storage-port.interface.ts index 06e38842..0227ab00 100644 --- a/apps/api/src/common/interfaces/storage-port.interface.ts +++ b/apps/api/src/common/interfaces/storage-port.interface.ts @@ -29,7 +29,7 @@ export type { VectorIndexSnapshot, VectorIndexSnapshotQueryOptions, } from '@betterdb/shared'; -export type { ThroughputSettings } from '@betterdb/shared'; +export type { MetricForecastSettings, MetricKind } from '@betterdb/shared'; import type { AppSettings, AuditQueryOptions, @@ -46,7 +46,8 @@ import type { SettingsUpdateRequest, StoredAclEntry, StoredClientSnapshot, - ThroughputSettings, + MetricForecastSettings, + MetricKind, VectorIndexSnapshot, VectorIndexSnapshotQueryOptions, Webhook, @@ -439,9 +440,12 @@ export interface StoragePort { revokeAgentToken(id: string): Promise; updateAgentTokenLastUsed(id: string): Promise; - // Throughput Forecasting Settings - getThroughputSettings(connectionId: string): Promise; - saveThroughputSettings(settings: ThroughputSettings): Promise; - deleteThroughputSettings(connectionId: string): Promise; - getActiveThroughputSettings(): Promise; + // Metric Forecasting Settings + getMetricForecastSettings( + connectionId: string, + metricKind: MetricKind, + ): Promise; + saveMetricForecastSettings(settings: MetricForecastSettings): Promise; + deleteMetricForecastSettings(connectionId: string, metricKind: MetricKind): Promise; + getActiveMetricForecastSettings(): Promise; } diff --git a/apps/api/src/metric-forecasting/__tests__/ceiling-resolvers.spec.ts b/apps/api/src/metric-forecasting/__tests__/ceiling-resolvers.spec.ts new file mode 100644 index 00000000..7d1988a5 --- /dev/null +++ b/apps/api/src/metric-forecasting/__tests__/ceiling-resolvers.spec.ts @@ -0,0 +1,100 @@ +import { CEILING_RESOLVERS } from '../ceiling-resolvers'; +import type { MetricForecastSettings } from '@betterdb/shared'; +import type { StoredMemorySnapshot } from '../../common/interfaces/storage-port.interface'; + +function makeSettings(overrides?: Partial): MetricForecastSettings { + return { + connectionId: 'conn-1', + metricKind: 'opsPerSec', + enabled: true, + ceiling: null, + rollingWindowMs: 21600000, + alertThresholdMs: 7200000, + updatedAt: Date.now(), + ...overrides, + }; +} + +function makeSnapshot(overrides?: Partial): StoredMemorySnapshot { + return { + id: 'snap-1', + timestamp: Date.now(), + usedMemory: 50_000_000, + usedMemoryRss: 60_000_000, + usedMemoryPeak: 70_000_000, + memFragmentationRatio: 1.2, + maxmemory: 0, + allocatorFragRatio: 1.0, + opsPerSec: 10_000, + cpuSys: 1.0, + cpuUser: 2.0, + ioThreadedReads: 0, + ioThreadedWrites: 0, + connectionId: 'conn-1', + ...overrides, + }; +} + +describe('CEILING_RESOLVERS', () => { + describe('opsPerSec', () => { + it('returns user-configured ceiling', () => { + expect(CEILING_RESOLVERS.opsPerSec(makeSettings({ ceiling: 80_000 }))).toBe(80_000); + }); + + it('returns null when no ceiling configured', () => { + expect(CEILING_RESOLVERS.opsPerSec(makeSettings({ ceiling: null }))).toBeNull(); + }); + }); + + describe('usedMemory', () => { + it('returns user-configured ceiling when set', () => { + const settings = makeSettings({ metricKind: 'usedMemory', ceiling: 200_000_000 }); + expect(CEILING_RESOLVERS.usedMemory(settings, makeSnapshot())).toBe(200_000_000); + }); + + it('auto-detects from maxmemory when ceiling is null', () => { + const settings = makeSettings({ metricKind: 'usedMemory', ceiling: null }); + const snapshot = makeSnapshot({ maxmemory: 100_000_000 }); + expect(CEILING_RESOLVERS.usedMemory(settings, snapshot)).toBe(100_000_000); + }); + + it('returns null when ceiling is null and maxmemory is 0', () => { + const settings = makeSettings({ metricKind: 'usedMemory', ceiling: null }); + const snapshot = makeSnapshot({ maxmemory: 0 }); + expect(CEILING_RESOLVERS.usedMemory(settings, snapshot)).toBeNull(); + }); + + it('returns null when ceiling is null and no snapshot', () => { + const settings = makeSettings({ metricKind: 'usedMemory', ceiling: null }); + expect(CEILING_RESOLVERS.usedMemory(settings)).toBeNull(); + }); + }); + + describe('cpuTotal', () => { + it('returns user-configured ceiling', () => { + expect(CEILING_RESOLVERS.cpuTotal(makeSettings({ ceiling: 80 }))).toBe(80); + }); + + it('defaults to 100 when no ceiling configured', () => { + expect(CEILING_RESOLVERS.cpuTotal(makeSettings({ ceiling: null }))).toBe(100); + }); + + it('ceiling of 0 returns 0 (not default)', () => { + expect(CEILING_RESOLVERS.cpuTotal(makeSettings({ ceiling: 0 }))).toBe(0); + }); + }); + + describe('memFragmentation', () => { + it('returns user-configured ceiling', () => { + expect(CEILING_RESOLVERS.memFragmentation(makeSettings({ ceiling: 2.0 }))).toBe(2.0); + }); + + it('defaults to 1.5 when no ceiling configured', () => { + expect(CEILING_RESOLVERS.memFragmentation(makeSettings({ ceiling: null }))).toBe(1.5); + }); + + it('ceiling of 0 returns 0 (not default)', () => { + expect(CEILING_RESOLVERS.memFragmentation(makeSettings({ ceiling: 0 }))).toBe(0); + }); + }); +}); diff --git a/apps/api/src/metric-forecasting/__tests__/metric-extractors.spec.ts b/apps/api/src/metric-forecasting/__tests__/metric-extractors.spec.ts new file mode 100644 index 00000000..01b29e1c --- /dev/null +++ b/apps/api/src/metric-forecasting/__tests__/metric-extractors.spec.ts @@ -0,0 +1,47 @@ +import { METRIC_EXTRACTORS } from '../metric-extractors'; +import type { StoredMemorySnapshot } from '../../common/interfaces/storage-port.interface'; + +const snapshot: StoredMemorySnapshot = { + id: 'snap-1', + timestamp: Date.now(), + usedMemory: 50_000_000, + usedMemoryRss: 60_000_000, + usedMemoryPeak: 70_000_000, + memFragmentationRatio: 1.35, + maxmemory: 100_000_000, + allocatorFragRatio: 1.1, + opsPerSec: 12_345, + cpuSys: 3.5, + cpuUser: 7.2, + ioThreadedReads: 100, + ioThreadedWrites: 50, + connectionId: 'conn-1', +}; + +describe('METRIC_EXTRACTORS', () => { + it('opsPerSec extracts opsPerSec', () => { + expect(METRIC_EXTRACTORS.opsPerSec(snapshot)).toBe(12_345); + }); + + it('usedMemory extracts usedMemory', () => { + expect(METRIC_EXTRACTORS.usedMemory(snapshot)).toBe(50_000_000); + }); + + it('cpuTotal sums cpuSys + cpuUser', () => { + expect(METRIC_EXTRACTORS.cpuTotal(snapshot)).toBeCloseTo(10.7); + }); + + it('memFragmentation extracts memFragmentationRatio', () => { + expect(METRIC_EXTRACTORS.memFragmentation(snapshot)).toBe(1.35); + }); + + it('cpuTotal handles zero values', () => { + const s = { ...snapshot, cpuSys: 0, cpuUser: 0 }; + expect(METRIC_EXTRACTORS.cpuTotal(s)).toBe(0); + }); + + it('opsPerSec handles zero', () => { + const s = { ...snapshot, opsPerSec: 0 }; + expect(METRIC_EXTRACTORS.opsPerSec(s)).toBe(0); + }); +}); diff --git a/apps/api/src/metric-forecasting/__tests__/metric-forecasting.service.spec.ts b/apps/api/src/metric-forecasting/__tests__/metric-forecasting.service.spec.ts new file mode 100644 index 00000000..fb39b618 --- /dev/null +++ b/apps/api/src/metric-forecasting/__tests__/metric-forecasting.service.spec.ts @@ -0,0 +1,730 @@ +import { Test, TestingModule } from '@nestjs/testing'; +import { MemoryAdapter } from '../../storage/adapters/memory.adapter'; +import { MetricForecastingService } from '../metric-forecasting.service'; +import { SettingsService } from '../../settings/settings.service'; +import { ConnectionRegistry } from '../../connections/connection-registry.service'; +import type { AppSettings, MetricForecastSettings, MetricKind } from '@betterdb/shared'; +import type { StoredMemorySnapshot } from '../../common/interfaces/storage-port.interface'; + +// ── Test Helpers ── + +function mockGlobalSettings(overrides?: Partial): AppSettings { + return { + id: 1, + auditPollIntervalMs: 60000, + clientAnalyticsPollIntervalMs: 60000, + anomalyPollIntervalMs: 1000, + anomalyCacheTtlMs: 3600000, + anomalyPrometheusIntervalMs: 30000, + metricForecastingEnabled: true, + metricForecastingDefaultRollingWindowMs: 21600000, + metricForecastingDefaultAlertThresholdMs: 7200000, + createdAt: Date.now(), + updatedAt: Date.now(), + ...overrides, + }; +} + +function makeSettings(overrides?: Partial): MetricForecastSettings { + return { + connectionId: 'conn-1', + metricKind: 'opsPerSec', + enabled: true, + ceiling: null, + rollingWindowMs: 21600000, + alertThresholdMs: 7200000, + updatedAt: Date.now(), + ...overrides, + }; +} + +function generateSnapshots(opts: { + count: number; + startTime: number; + intervalMs: number; + startOps?: number; + endOps?: number; + startMemory?: number; + endMemory?: number; + startCpuSys?: number; + endCpuSys?: number; + startCpuUser?: number; + endCpuUser?: number; + startFragRatio?: number; + endFragRatio?: number; + maxmemory?: number; + connectionId?: string; +}): StoredMemorySnapshot[] { + const snapshots: StoredMemorySnapshot[] = []; + for (let i = 0; i < opts.count; i++) { + const t = opts.count > 1 ? i / (opts.count - 1) : 0; + snapshots.push({ + id: `snap-${i}`, + timestamp: opts.startTime + i * opts.intervalMs, + usedMemory: Math.round((opts.startMemory ?? 1_000_000) + t * ((opts.endMemory ?? 1_000_000) - (opts.startMemory ?? 1_000_000))), + usedMemoryRss: 1_200_000, + usedMemoryPeak: 1_500_000, + memFragmentationRatio: (opts.startFragRatio ?? 1.2) + t * ((opts.endFragRatio ?? 1.2) - (opts.startFragRatio ?? 1.2)), + maxmemory: opts.maxmemory ?? 0, + allocatorFragRatio: 1.0, + opsPerSec: Math.round((opts.startOps ?? 10_000) + t * ((opts.endOps ?? 10_000) - (opts.startOps ?? 10_000))), + cpuSys: (opts.startCpuSys ?? 1.0) + t * ((opts.endCpuSys ?? 1.0) - (opts.startCpuSys ?? 1.0)), + cpuUser: (opts.startCpuUser ?? 2.0) + t * ((opts.endCpuUser ?? 2.0) - (opts.startCpuUser ?? 2.0)), + ioThreadedReads: 0, + ioThreadedWrites: 0, + connectionId: opts.connectionId ?? 'conn-1', + }); + } + return snapshots; +} + +// ── Test Suite ── + +describe('MetricForecastingService', () => { + let service: MetricForecastingService; + let storage: MemoryAdapter; + let settingsService: { getCachedSettings: jest.Mock }; + + beforeEach(async () => { + storage = new MemoryAdapter(); + await storage.initialize(); + + settingsService = { + getCachedSettings: jest.fn().mockReturnValue(mockGlobalSettings()), + }; + + const module: TestingModule = await Test.createTestingModule({ + providers: [ + MetricForecastingService, + { provide: 'STORAGE_CLIENT', useValue: storage }, + { provide: SettingsService, useValue: settingsService }, + { + provide: ConnectionRegistry, + useValue: { list: jest.fn().mockReturnValue([]), getConfig: jest.fn() }, + }, + ], + }).compile(); + + service = module.get(MetricForecastingService); + }); + + // ── Storage Round-Trip ── + + describe('storage round-trip', () => { + it('saves and retrieves metric forecast settings', async () => { + const settings = makeSettings({ ceiling: 80_000 }); + await storage.saveMetricForecastSettings(settings); + const result = await storage.getMetricForecastSettings('conn-1', 'opsPerSec'); + expect(result).not.toBeNull(); + expect(result!.connectionId).toBe('conn-1'); + expect(result!.metricKind).toBe('opsPerSec'); + expect(result!.ceiling).toBe(80_000); + }); + + it('returns null for missing settings', async () => { + const result = await storage.getMetricForecastSettings('conn-unknown', 'opsPerSec'); + expect(result).toBeNull(); + }); + + it('upsert overwrites existing settings', async () => { + await storage.saveMetricForecastSettings(makeSettings({ ceiling: 50_000 })); + await storage.saveMetricForecastSettings(makeSettings({ ceiling: 90_000 })); + const result = await storage.getMetricForecastSettings('conn-1', 'opsPerSec'); + expect(result!.ceiling).toBe(90_000); + }); + + it('different metric kinds are independent', async () => { + await storage.saveMetricForecastSettings(makeSettings({ metricKind: 'opsPerSec', ceiling: 80_000 })); + await storage.saveMetricForecastSettings(makeSettings({ metricKind: 'usedMemory', ceiling: 200_000_000 })); + const ops = await storage.getMetricForecastSettings('conn-1', 'opsPerSec'); + const mem = await storage.getMetricForecastSettings('conn-1', 'usedMemory'); + expect(ops!.ceiling).toBe(80_000); + expect(mem!.ceiling).toBe(200_000_000); + }); + + it('getActiveMetricForecastSettings filters correctly', async () => { + await storage.saveMetricForecastSettings( + makeSettings({ connectionId: 'a', metricKind: 'opsPerSec', enabled: true, ceiling: 80_000 }), + ); + await storage.saveMetricForecastSettings( + makeSettings({ connectionId: 'b', metricKind: 'usedMemory', enabled: true, ceiling: null }), + ); + await storage.saveMetricForecastSettings( + makeSettings({ connectionId: 'c', metricKind: 'cpuTotal', enabled: false, ceiling: 80 }), + ); + const active = await storage.getActiveMetricForecastSettings(); + expect(active).toHaveLength(1); + expect(active[0].connectionId).toBe('a'); + }); + }); + + // ── opsPerSec (same behavior as throughput) ── + + describe('opsPerSec: rising trend, no ceiling', () => { + it('returns rising trend with correct direction', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startOps: 10_000, endOps: 20_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'opsPerSec'); + + expect(forecast.metricKind).toBe('opsPerSec'); + expect(forecast.mode).toBe('trend'); + expect(forecast.trendDirection).toBe('rising'); + expect(forecast.growthPercent).toBeGreaterThan(5); + expect(forecast.ceiling).toBeNull(); + expect(forecast.currentValue).toBeGreaterThanOrEqual(19_000); + expect(forecast.insufficientData).toBe(false); + }); + }); + + describe('opsPerSec: rising trend with ceiling', () => { + it('returns forecast with time-to-limit', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startOps: 40_000, endOps: 50_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + await storage.saveMetricForecastSettings(makeSettings({ ceiling: 80_000 })); + + const forecast = await service.getForecast('conn-1', 'opsPerSec'); + + expect(forecast.mode).toBe('forecast'); + expect(forecast.timeToLimitMs).toBeGreaterThan(0); + expect(forecast.ceiling).toBe(80_000); + }); + }); + + // ── usedMemory ── + + describe('usedMemory: rising trend with auto-detected ceiling', () => { + it('auto-detects ceiling from maxmemory', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startMemory: 50_000_000, endMemory: 80_000_000, + maxmemory: 100_000_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'usedMemory'); + + expect(forecast.metricKind).toBe('usedMemory'); + expect(forecast.mode).toBe('forecast'); + expect(forecast.ceiling).toBe(100_000_000); + expect(forecast.timeToLimitMs).toBeGreaterThan(0); + }); + + it('uses trend mode when maxmemory is 0 and no ceiling set', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startMemory: 50_000_000, endMemory: 80_000_000, + maxmemory: 0, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'usedMemory'); + + expect(forecast.mode).toBe('trend'); + expect(forecast.ceiling).toBeNull(); + }); + }); + + // ── cpuTotal ── + + describe('cpuTotal: rising trend with default ceiling', () => { + it('uses default ceiling of 100%', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startCpuSys: 10, endCpuSys: 20, startCpuUser: 20, endCpuUser: 40, + connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'cpuTotal'); + + expect(forecast.metricKind).toBe('cpuTotal'); + expect(forecast.mode).toBe('forecast'); + expect(forecast.ceiling).toBe(100); + expect(forecast.trendDirection).toBe('rising'); + expect(forecast.timeToLimitMs).toBeGreaterThan(0); + }); + }); + + // ── memFragmentation ── + + describe('memFragmentation: rising trend with default ceiling', () => { + it('uses default ceiling of 1.5', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startFragRatio: 1.0, endFragRatio: 1.3, + connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'memFragmentation'); + + expect(forecast.metricKind).toBe('memFragmentation'); + expect(forecast.mode).toBe('forecast'); + expect(forecast.ceiling).toBe(1.5); + expect(forecast.trendDirection).toBe('rising'); + expect(forecast.timeToLimitMs).toBeGreaterThan(0); + }); + }); + + // ── Insufficient data ── + + describe('insufficient data', () => { + it.each(['opsPerSec', 'usedMemory', 'cpuTotal', 'memFragmentation'])( + '%s: no snapshots returns insufficient data', + async (metricKind) => { + const forecast = await service.getForecast('conn-1', metricKind); + expect(forecast.insufficientData).toBe(true); + expect(forecast.metricKind).toBe(metricKind); + }, + ); + }); + + // ── Disabled ── + + describe('disabled', () => { + it('globally disabled returns enabled=false', async () => { + settingsService.getCachedSettings.mockReturnValue( + mockGlobalSettings({ metricForecastingEnabled: false }), + ); + const forecast = await service.getForecast('conn-1', 'usedMemory'); + expect(forecast.enabled).toBe(false); + }); + + it('per-connection disabled returns enabled=false', async () => { + await storage.saveMetricForecastSettings( + makeSettings({ metricKind: 'cpuTotal', enabled: false }), + ); + const forecast = await service.getForecast('conn-1', 'cpuTotal'); + expect(forecast.enabled).toBe(false); + }); + }); + + // ── Settings management ── + + describe('settings management', () => { + it('first access creates settings from global defaults', async () => { + const settings = await service.getSettings('conn-1', 'usedMemory'); + expect(settings.metricKind).toBe('usedMemory'); + expect(settings.enabled).toBe(true); + expect(settings.ceiling).toBeNull(); + expect(settings.rollingWindowMs).toBe(21600000); + }); + + it('update merges with existing settings', async () => { + const updated = await service.updateSettings('conn-1', 'opsPerSec', { ceiling: 80_000 }); + expect(updated.ceiling).toBe(80_000); + expect(updated.rollingWindowMs).toBe(21600000); + }); + + it('update invalidates forecast cache', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startOps: 40_000, endOps: 50_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const first = await service.getForecast('conn-1', 'opsPerSec'); + expect(first.mode).toBe('trend'); + + await service.updateSettings('conn-1', 'opsPerSec', { ceiling: 80_000 }); + + const second = await service.getForecast('conn-1', 'opsPerSec'); + expect(second.mode).toBe('forecast'); + }); + }); + + // ── Ceiling exceeded ── (H2) + + describe('ceiling already exceeded', () => { + it('memory above maxmemory returns exceeded', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startMemory: 90_000_000, endMemory: 110_000_000, + maxmemory: 100_000_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'usedMemory'); + + expect(forecast.mode).toBe('forecast'); + expect(forecast.timeToLimitMs).toBe(0); + expect(forecast.timeToLimitHuman).toMatch(/exceeded/i); + }); + + it('CPU above ceiling returns exceeded', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startCpuSys: 40, endCpuSys: 55, startCpuUser: 40, endCpuUser: 55, + connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'cpuTotal'); + + expect(forecast.mode).toBe('forecast'); + expect(forecast.timeToLimitMs).toBe(0); + expect(forecast.timeToLimitHuman).toMatch(/exceeded/i); + }); + }); + + // ── Falling/stable for non-ops metrics ── (H3) + + describe('falling/stable trends with ceiling', () => { + it('falling memory returns not projected', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startMemory: 80_000_000, endMemory: 60_000_000, + maxmemory: 100_000_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'usedMemory'); + + expect(forecast.mode).toBe('forecast'); + expect(forecast.trendDirection).toBe('falling'); + expect(forecast.timeToLimitMs).toBeNull(); + expect(forecast.timeToLimitHuman).toContain('Not projected'); + }); + + it('stable CPU returns not projected', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startCpuSys: 25, endCpuSys: 25, startCpuUser: 25, endCpuUser: 25, + connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'cpuTotal'); + + expect(forecast.mode).toBe('forecast'); + expect(forecast.trendDirection).toBe('stable'); + expect(forecast.timeToLimitMs).toBeNull(); + }); + + it('falling fragmentation returns not projected', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startFragRatio: 1.4, endFragRatio: 1.1, + connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'memFragmentation'); + + expect(forecast.trendDirection).toBe('falling'); + expect(forecast.timeToLimitMs).toBeNull(); + }); + }); + + // ── Zero slope / flat values ── (H4) + + describe('zero slope (identical values)', () => { + it('flat opsPerSec returns stable trend', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startOps: 5_000, endOps: 5_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'opsPerSec'); + + expect(forecast.trendDirection).toBe('stable'); + expect(forecast.growthRate).toBeCloseTo(0, 1); + expect(forecast.growthPercent).toBeCloseTo(0, 1); + }); + + it('flat memory with ceiling returns not projected', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startMemory: 50_000_000, endMemory: 50_000_000, + maxmemory: 100_000_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'usedMemory'); + + expect(forecast.trendDirection).toBe('stable'); + expect(forecast.timeToLimitMs).toBeNull(); + }); + }); + + // ── Connection isolation ── (M1) + + describe('connection isolation', () => { + it('different connections return independent forecasts', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startOps: 10_000, endOps: 20_000, connectionId: 'conn-a', + }), + 'conn-a', + ); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startOps: 50_000, endOps: 40_000, connectionId: 'conn-b', + }), + 'conn-b', + ); + + const forecastA = await service.getForecast('conn-a', 'opsPerSec'); + const forecastB = await service.getForecast('conn-b', 'opsPerSec'); + + expect(forecastA.trendDirection).toBe('rising'); + expect(forecastB.trendDirection).toBe('falling'); + }); + }); + + // ── Boundary conditions ── (M2) + + describe('data sufficiency boundaries', () => { + it('exactly 3 snapshots spanning 30 min is sufficient', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 3, startTime: now - 30 * 60_000, intervalMs: 15 * 60_000, + startOps: 10_000, endOps: 20_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'opsPerSec'); + expect(forecast.insufficientData).toBe(false); + }); + + it('3 snapshots spanning 29 min is insufficient', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 3, startTime: now - 29 * 60_000, intervalMs: 14.5 * 60_000, + startOps: 10_000, endOps: 20_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'opsPerSec'); + expect(forecast.insufficientData).toBe(true); + }); + + it('2 snapshots spanning 60 min is insufficient (below MIN_DATA_POINTS)', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 2, startTime: now - 60 * 60_000, intervalMs: 60 * 60_000, + startOps: 10_000, endOps: 20_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const forecast = await service.getForecast('conn-1', 'opsPerSec'); + expect(forecast.insufficientData).toBe(true); + }); + }); + + // ── Alert dispatch ── (H1) + + describe('checkAlerts', () => { + let webhookService: { dispatchMetricForecastLimit: jest.Mock }; + let connectionRegistry: { list: jest.Mock; getConfig: jest.Mock }; + + beforeEach(async () => { + storage = new MemoryAdapter(); + await storage.initialize(); + + webhookService = { + dispatchMetricForecastLimit: jest.fn().mockResolvedValue(undefined), + }; + connectionRegistry = { + list: jest.fn().mockReturnValue([]), + getConfig: jest.fn().mockReturnValue({ host: 'localhost', port: 6380 }), + }; + + const module: TestingModule = await Test.createTestingModule({ + providers: [ + MetricForecastingService, + { provide: 'STORAGE_CLIENT', useValue: storage }, + { provide: SettingsService, useValue: { getCachedSettings: jest.fn().mockReturnValue(mockGlobalSettings()) } }, + { provide: ConnectionRegistry, useValue: connectionRegistry }, + { provide: 'WEBHOOK_EVENTS_PRO_SERVICE', useValue: webhookService }, + ], + }).compile(); + + service = module.get(MetricForecastingService); + }); + + it('dispatches alert when time-to-limit is within threshold', async () => { + const now = Date.now(); + // Rising ops, ceiling 80k, should project ~3h to limit + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startOps: 40_000, endOps: 50_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + // Set ceiling + alert threshold of 4h so the ~3h projection triggers + await storage.saveMetricForecastSettings( + makeSettings({ + ceiling: 80_000, + alertThresholdMs: 4 * 3_600_000, + }), + ); + + // Trigger the private checkAlerts by accessing it via prototype + await (service as any).checkAlerts(); + + expect(webhookService.dispatchMetricForecastLimit).toHaveBeenCalledTimes(1); + expect(webhookService.dispatchMetricForecastLimit).toHaveBeenCalledWith( + expect.objectContaining({ + event: 'metric_forecast.limit', + metricKind: 'opsPerSec', + connectionId: 'conn-1', + }), + ); + }); + + it('dispatches with safe values for hysteresis recovery when time-to-limit exceeds threshold', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startOps: 40_000, endOps: 50_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + // Ceiling very far away, alert threshold small + await storage.saveMetricForecastSettings( + makeSettings({ + ceiling: 500_000, + alertThresholdMs: 1_800_000, // 30 min — projection is much longer + }), + ); + + await (service as any).checkAlerts(); + + expect(webhookService.dispatchMetricForecastLimit).toHaveBeenCalledTimes(1); + const call = webhookService.dispatchMetricForecastLimit.mock.calls[0][0]; + expect(call.timeToLimitMs).toBeGreaterThan(1_800_000); + }); + + it('does not dispatch for disabled settings', async () => { + await storage.saveMetricForecastSettings( + makeSettings({ + ceiling: 80_000, + enabled: false, + alertThresholdMs: 999_999_999, + }), + ); + + await (service as any).checkAlerts(); + + expect(webhookService.dispatchMetricForecastLimit).not.toHaveBeenCalled(); + }); + }); + + // ── Cache ── + + describe('forecast cache', () => { + beforeEach(() => jest.useFakeTimers()); + afterEach(() => jest.useRealTimers()); + + it('second call within TTL uses cache', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startOps: 10_000, endOps: 20_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const spy = jest.spyOn(storage, 'getMemorySnapshots'); + await service.getForecast('conn-1', 'opsPerSec'); + await service.getForecast('conn-1', 'opsPerSec'); + expect(spy).toHaveBeenCalledTimes(1); + }); + + it('different metric kinds have separate caches', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startOps: 10_000, endOps: 20_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const spy = jest.spyOn(storage, 'getMemorySnapshots'); + await service.getForecast('conn-1', 'opsPerSec'); + await service.getForecast('conn-1', 'usedMemory'); + expect(spy).toHaveBeenCalledTimes(2); + }); + + it('cache expires after TTL', async () => { + const now = Date.now(); + await storage.saveMemorySnapshots( + generateSnapshots({ + count: 60, startTime: now - 60 * 60_000, intervalMs: 60_000, + startOps: 10_000, endOps: 20_000, connectionId: 'conn-1', + }), + 'conn-1', + ); + + const spy = jest.spyOn(storage, 'getMemorySnapshots'); + await service.getForecast('conn-1', 'opsPerSec'); + jest.advanceTimersByTime(61_000); + await service.getForecast('conn-1', 'opsPerSec'); + expect(spy).toHaveBeenCalledTimes(2); + }); + }); +}); diff --git a/apps/api/src/metric-forecasting/__tests__/metric-kind-validation.pipe.spec.ts b/apps/api/src/metric-forecasting/__tests__/metric-kind-validation.pipe.spec.ts new file mode 100644 index 00000000..f5838bcf --- /dev/null +++ b/apps/api/src/metric-forecasting/__tests__/metric-kind-validation.pipe.spec.ts @@ -0,0 +1,29 @@ +import { BadRequestException } from '@nestjs/common'; +import { MetricKindValidationPipe } from '../pipes/metric-kind-validation.pipe'; + +describe('MetricKindValidationPipe', () => { + const pipe = new MetricKindValidationPipe(); + + it.each(['opsPerSec', 'usedMemory', 'cpuTotal', 'memFragmentation'] as const)( + 'accepts valid metric kind: %s', + (kind) => { + expect(pipe.transform(kind)).toBe(kind); + }, + ); + + it('throws BadRequestException for invalid metric kind', () => { + expect(() => pipe.transform('invalid')).toThrow(BadRequestException); + }); + + it('includes valid kinds in error message', () => { + expect(() => pipe.transform('bogus')).toThrow( + expect.objectContaining({ + message: expect.stringContaining('opsPerSec'), + }), + ); + }); + + it('rejects empty string', () => { + expect(() => pipe.transform('')).toThrow(BadRequestException); + }); +}); diff --git a/apps/api/src/metric-forecasting/ceiling-resolvers.ts b/apps/api/src/metric-forecasting/ceiling-resolvers.ts new file mode 100644 index 00000000..332db60a --- /dev/null +++ b/apps/api/src/metric-forecasting/ceiling-resolvers.ts @@ -0,0 +1,23 @@ +import type { MetricKind, MetricForecastSettings } from '@betterdb/shared'; +import type { StoredMemorySnapshot } from '../common/interfaces/storage-port.interface'; + +export type CeilingResolver = ( + settings: MetricForecastSettings, + latestSnapshot?: StoredMemorySnapshot, +) => number | null; + +export const CEILING_RESOLVERS: Record = { + opsPerSec: (s) => s.ceiling, + + usedMemory: (s, snapshot) => { + if (s.ceiling !== null) { + return s.ceiling; + } + if (snapshot && snapshot.maxmemory > 0) return snapshot.maxmemory; + return null; + }, + + cpuTotal: (s) => s.ceiling ?? 100, + + memFragmentation: (s) => s.ceiling ?? 1.5, +}; diff --git a/apps/api/src/metric-forecasting/metric-extractors.ts b/apps/api/src/metric-forecasting/metric-extractors.ts new file mode 100644 index 00000000..3f4d2f61 --- /dev/null +++ b/apps/api/src/metric-forecasting/metric-extractors.ts @@ -0,0 +1,11 @@ +import type { MetricKind } from '@betterdb/shared'; +import type { StoredMemorySnapshot } from '../common/interfaces/storage-port.interface'; + +export type MetricExtractor = (snapshot: StoredMemorySnapshot) => number; + +export const METRIC_EXTRACTORS: Record = { + opsPerSec: (s) => s.opsPerSec, + usedMemory: (s) => s.usedMemory, + cpuTotal: (s) => s.cpuSys + s.cpuUser, + memFragmentation: (s) => s.memFragmentationRatio, +}; diff --git a/apps/api/src/metric-forecasting/metric-forecasting.controller.ts b/apps/api/src/metric-forecasting/metric-forecasting.controller.ts new file mode 100644 index 00000000..382881eb --- /dev/null +++ b/apps/api/src/metric-forecasting/metric-forecasting.controller.ts @@ -0,0 +1,40 @@ +import { Controller, Get, Put, Body, Param } from '@nestjs/common'; +import { MetricForecastingService } from './metric-forecasting.service'; +import { MetricKindValidationPipe } from './pipes/metric-kind-validation.pipe'; +import { ConnectionId } from '../common/decorators/connection-id.decorator'; +import type { + MetricForecast, + MetricForecastSettings, + MetricForecastSettingsUpdate, + MetricKind, +} from '@betterdb/shared'; + +@Controller('metric-forecasting') +export class MetricForecastingController { + constructor(private readonly service: MetricForecastingService) {} + + @Get(':metricKind/forecast') + async getForecast( + @Param('metricKind', MetricKindValidationPipe) metricKind: MetricKind, + @ConnectionId() connectionId?: string, + ): Promise { + return this.service.getForecast(connectionId || 'env-default', metricKind); + } + + @Get(':metricKind/settings') + async getSettings( + @Param('metricKind', MetricKindValidationPipe) metricKind: MetricKind, + @ConnectionId() connectionId?: string, + ): Promise { + return this.service.getSettings(connectionId || 'env-default', metricKind); + } + + @Put(':metricKind/settings') + async updateSettings( + @Param('metricKind', MetricKindValidationPipe) metricKind: MetricKind, + @ConnectionId() connectionId?: string, + @Body() updates?: MetricForecastSettingsUpdate, + ): Promise { + return this.service.updateSettings(connectionId || 'env-default', metricKind, updates || {}); + } +} diff --git a/apps/api/src/metric-forecasting/metric-forecasting.module.ts b/apps/api/src/metric-forecasting/metric-forecasting.module.ts new file mode 100644 index 00000000..2b4be783 --- /dev/null +++ b/apps/api/src/metric-forecasting/metric-forecasting.module.ts @@ -0,0 +1,13 @@ +import { Module } from '@nestjs/common'; +import { StorageModule } from '../storage/storage.module'; +import { ConnectionsModule } from '../connections/connections.module'; +import { MetricForecastingService } from './metric-forecasting.service'; +import { MetricForecastingController } from './metric-forecasting.controller'; + +@Module({ + imports: [StorageModule, ConnectionsModule], + providers: [MetricForecastingService], + controllers: [MetricForecastingController], + exports: [MetricForecastingService], +}) +export class MetricForecastingModule {} diff --git a/apps/api/src/throughput-forecasting/throughput-forecasting.service.ts b/apps/api/src/metric-forecasting/metric-forecasting.service.ts similarity index 54% rename from apps/api/src/throughput-forecasting/throughput-forecasting.service.ts rename to apps/api/src/metric-forecasting/metric-forecasting.service.ts index 1e4b79e0..46aa06ee 100644 --- a/apps/api/src/throughput-forecasting/throughput-forecasting.service.ts +++ b/apps/api/src/metric-forecasting/metric-forecasting.service.ts @@ -6,15 +6,18 @@ import { OnModuleInit, Optional, } from '@nestjs/common'; -import type { StoragePort } from '../common/interfaces/storage-port.interface'; +import { StoragePort, WebhookEventType } from '../common/interfaces/storage-port.interface'; import { SettingsService } from '../settings/settings.service'; import { ConnectionRegistry } from '../connections/connection-registry.service'; import type { - ThroughputForecast, - ThroughputSettings, - ThroughputSettingsUpdate, + MetricForecast, + MetricForecastSettings, + MetricForecastSettingsUpdate, + MetricKind, } from '@betterdb/shared'; import { WEBHOOK_EVENTS_PRO_SERVICE, type IWebhookEventsProService } from '@betterdb/shared'; +import { METRIC_EXTRACTORS } from './metric-extractors'; +import { CEILING_RESOLVERS } from './ceiling-resolvers'; const MIN_DATA_POINTS = 3; const MIN_TIME_SPAN_MS = 30 * 60_000; // 30 minutes @@ -23,9 +26,9 @@ const CACHE_TTL_MS = 60_000; const ALERT_CHECK_INTERVAL_MS = 60_000; @Injectable() -export class ThroughputForecastingService implements OnModuleInit, OnModuleDestroy { - private readonly logger = new Logger(ThroughputForecastingService.name); - private forecastCache = new Map(); +export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { + private readonly logger = new Logger(MetricForecastingService.name); + private forecastCache = new Map(); private alertInterval: ReturnType | null = null; constructor( @@ -39,36 +42,37 @@ export class ThroughputForecastingService implements OnModuleInit, OnModuleDestr onModuleInit(): void { if (this.webhookEventsProService) { - this.logger.log('Enabling throughput forecasting webhook alerts'); + this.logger.log('Enabling metric forecasting webhook alerts'); this.alertInterval = setInterval(() => this.checkAlerts(), ALERT_CHECK_INTERVAL_MS); } } onModuleDestroy(): void { if (this.alertInterval) { - this.logger.log('Disabling throughput forecasting webhook alerts'); clearInterval(this.alertInterval); this.alertInterval = null; } } - async getForecast(connectionId: string): Promise { + async getForecast(connectionId: string, metricKind: MetricKind): Promise { + const cacheKey = `${connectionId}:${metricKind}`; + // Check cache - const cached = this.forecastCache.get(connectionId); + const cached = this.forecastCache.get(cacheKey); if (cached && Date.now() - cached.computedAt < CACHE_TTL_MS) { return cached.forecast; } // Check global toggle const globalSettings = this.settingsService.getCachedSettings(); - if (!globalSettings.throughputForecastingEnabled) { - return this.buildDisabledForecast(connectionId); + if (!globalSettings.metricForecastingEnabled) { + return this.buildDisabledForecast(connectionId, metricKind); } // Check per-connection settings - const settings = await this.getOrCreateSettings(connectionId); + const settings = await this.getOrCreateSettings(connectionId, metricKind); if (!settings.enabled) { - return this.buildDisabledForecast(connectionId); + return this.buildDisabledForecast(connectionId, metricKind); } // Query snapshots @@ -82,18 +86,21 @@ export class ThroughputForecastingService implements OnModuleInit, OnModuleDestr // Reverse to ascending (query returns DESC) const sorted = [...snapshots].reverse(); + // Extract metric values + const extractor = METRIC_EXTRACTORS[metricKind]; + const latestValue = sorted.length > 0 ? extractor(sorted[sorted.length - 1]) : 0; + // Check sufficient data - const latestOps = sorted.length > 0 ? sorted[sorted.length - 1].opsPerSec : 0; if (sorted.length < MIN_DATA_POINTS) { - return this.buildInsufficientForecast(connectionId, settings, latestOps); + return this.buildInsufficientForecast(connectionId, metricKind, settings, latestValue); } const timeSpan = sorted[sorted.length - 1].timestamp - sorted[0].timestamp; if (timeSpan < MIN_TIME_SPAN_MS) { - return this.buildInsufficientForecast(connectionId, settings, latestOps); + return this.buildInsufficientForecast(connectionId, metricKind, settings, latestValue); } - // Linear regression - const points = sorted.map((s) => ({ x: s.timestamp, y: s.opsPerSec })); + // Linear regression on extracted metric + const points = sorted.map((s) => ({ x: s.timestamp, y: extractor(s) })); const { slope, intercept } = this.linearRegression(points); // Compute metrics @@ -101,8 +108,8 @@ export class ThroughputForecastingService implements OnModuleInit, OnModuleDestr const windowEnd = sorted[sorted.length - 1].timestamp; const predictedStart = slope * windowStart + intercept; const predictedEnd = slope * windowEnd + intercept; - const currentOpsPerSec = latestOps; - const growthRate = slope * 3_600_000; // ops/sec per hour + const currentValue = latestValue; + const growthRate = slope * 3_600_000; // units per hour const growthPercent = predictedStart !== 0 ? ((predictedEnd - predictedStart) / Math.abs(predictedStart)) * 100 : 0; @@ -113,9 +120,14 @@ export class ThroughputForecastingService implements OnModuleInit, OnModuleDestr ? 'falling' : 'stable'; + // Resolve ceiling + const latestSnapshot = sorted[sorted.length - 1]; + const resolvedCeiling = CEILING_RESOLVERS[metricKind](settings, latestSnapshot); + const baseForecast = { connectionId, - currentOpsPerSec, + metricKind, + currentValue, growthRate, growthPercent, trendDirection, @@ -125,14 +137,14 @@ export class ThroughputForecastingService implements OnModuleInit, OnModuleDestr insufficientData: false, }; - let forecast: ThroughputForecast; + let forecast: MetricForecast; - if (settings.opsCeiling === null) { + if (resolvedCeiling === null) { // Trend mode forecast = { ...baseForecast, mode: 'trend', - opsCeiling: null, + ceiling: null, timeToLimitMs: null, timeToLimitHuman: this.formatTrendSummary( growthPercent, @@ -141,14 +153,13 @@ export class ThroughputForecastingService implements OnModuleInit, OnModuleDestr ), }; } else { - // Forecast mode - const currentPredicted = slope * now + intercept; - - if (currentPredicted >= settings.opsCeiling) { + // Forecast mode — use actual current value, not regression estimate, + // so fast spikes are detected immediately instead of lagging behind the trend line. + if (currentValue >= resolvedCeiling) { forecast = { ...baseForecast, mode: 'forecast', - opsCeiling: settings.opsCeiling, + ceiling: resolvedCeiling, timeToLimitMs: 0, timeToLimitHuman: 'Ceiling already exceeded', }; @@ -156,16 +167,16 @@ export class ThroughputForecastingService implements OnModuleInit, OnModuleDestr forecast = { ...baseForecast, mode: 'forecast', - opsCeiling: settings.opsCeiling, + ceiling: resolvedCeiling, timeToLimitMs: null, timeToLimitHuman: 'Not projected to reach ceiling', }; } else { - const timeToLimitMs = (settings.opsCeiling - currentPredicted) / slope; + const timeToLimitMs = (resolvedCeiling - currentValue) / slope; forecast = { ...baseForecast, mode: 'forecast', - opsCeiling: settings.opsCeiling, + ceiling: resolvedCeiling, timeToLimitMs, timeToLimitHuman: this.formatTimeToLimit(timeToLimitMs), }; @@ -173,55 +184,62 @@ export class ThroughputForecastingService implements OnModuleInit, OnModuleDestr } // Cache - this.forecastCache.set(connectionId, { forecast, computedAt: Date.now() }); + this.forecastCache.set(cacheKey, { forecast, computedAt: Date.now() }); return forecast; } - async getSettings(connectionId: string): Promise { - return this.getOrCreateSettings(connectionId); + async getSettings(connectionId: string, metricKind: MetricKind): Promise { + return this.getOrCreateSettings(connectionId, metricKind); } async updateSettings( connectionId: string, - updates: ThroughputSettingsUpdate, - ): Promise { - const current = await this.getOrCreateSettings(connectionId); - const merged: ThroughputSettings = { + metricKind: MetricKind, + updates: MetricForecastSettingsUpdate, + ): Promise { + const current = await this.getOrCreateSettings(connectionId, metricKind); + const merged: MetricForecastSettings = { ...current, ...updates, connectionId, + metricKind, updatedAt: Date.now(), }; - const saved = await this.storage.saveThroughputSettings(merged); - this.forecastCache.delete(connectionId); + const saved = await this.storage.saveMetricForecastSettings(merged); + this.forecastCache.delete(`${connectionId}:${metricKind}`); return saved; } - private async getOrCreateSettings(connectionId: string): Promise { - const existing = await this.storage.getThroughputSettings(connectionId); + private async getOrCreateSettings( + connectionId: string, + metricKind: MetricKind, + ): Promise { + const existing = await this.storage.getMetricForecastSettings(connectionId, metricKind); if (existing) return existing; const globalSettings = this.settingsService.getCachedSettings(); - if (!globalSettings.throughputForecastingEnabled) { + if (!globalSettings.metricForecastingEnabled) { return { connectionId, + metricKind, enabled: false, - opsCeiling: null, - rollingWindowMs: globalSettings.throughputForecastingDefaultRollingWindowMs, - alertThresholdMs: globalSettings.throughputForecastingDefaultAlertThresholdMs, + ceiling: null, + rollingWindowMs: globalSettings.metricForecastingDefaultRollingWindowMs, + alertThresholdMs: globalSettings.metricForecastingDefaultAlertThresholdMs, updatedAt: Date.now(), }; } - const newSettings: ThroughputSettings = { + const newSettings: MetricForecastSettings = { connectionId, + metricKind, enabled: true, - opsCeiling: null, - rollingWindowMs: globalSettings.throughputForecastingDefaultRollingWindowMs, - alertThresholdMs: globalSettings.throughputForecastingDefaultAlertThresholdMs, + ceiling: null, + rollingWindowMs: globalSettings.metricForecastingDefaultRollingWindowMs, + alertThresholdMs: globalSettings.metricForecastingDefaultAlertThresholdMs, updatedAt: Date.now(), }; - return this.storage.saveThroughputSettings(newSettings); + return this.storage.saveMetricForecastSettings(newSettings); } private linearRegression(points: { x: number; y: number }[]): { @@ -261,17 +279,18 @@ export class ThroughputForecastingService implements OnModuleInit, OnModuleDestr return `${sign}${growthPercent.toFixed(1)}% over ${windowHours}h, ${direction}`; } - private buildDisabledForecast(connectionId: string): ThroughputForecast { + private buildDisabledForecast(connectionId: string, metricKind: MetricKind): MetricForecast { return { connectionId, + metricKind, mode: 'trend', - currentOpsPerSec: 0, + currentValue: 0, growthRate: 0, growthPercent: 0, trendDirection: 'stable', dataPointCount: 0, windowMs: 0, - opsCeiling: null, + ceiling: null, timeToLimitMs: null, timeToLimitHuman: '', enabled: false, @@ -281,19 +300,21 @@ export class ThroughputForecastingService implements OnModuleInit, OnModuleDestr private buildInsufficientForecast( connectionId: string, - settings: ThroughputSettings, - currentOpsPerSec: number, - ): ThroughputForecast { + metricKind: MetricKind, + settings: MetricForecastSettings, + currentValue: number, + ): MetricForecast { return { connectionId, + metricKind, mode: 'trend', - currentOpsPerSec, + currentValue, growthRate: 0, growthPercent: 0, trendDirection: 'stable', dataPointCount: 0, windowMs: settings.rollingWindowMs, - opsCeiling: settings.opsCeiling, + ceiling: settings.ceiling, timeToLimitMs: null, timeToLimitHuman: '', enabled: true, @@ -304,37 +325,45 @@ export class ThroughputForecastingService implements OnModuleInit, OnModuleDestr } private async checkAlerts(): Promise { - if (!this.webhookEventsProService) return; - - const globalSettings = this.settingsService.getCachedSettings(); - if (!globalSettings.throughputForecastingEnabled) return; + if (!this.webhookEventsProService) { + this.logger.warn('WebhookEventsProService not initialized'); + return; + } try { - const activeSettings = await this.storage.getActiveThroughputSettings(); + const activeSettings = await this.storage.getActiveMetricForecastSettings(); for (const settings of activeSettings) { - const forecast = await this.getForecast(settings.connectionId); - if ( - forecast.timeToLimitMs !== null && - forecast.timeToLimitMs > 0 && - forecast.opsCeiling !== null - ) { + try { + const forecast = await this.getForecast(settings.connectionId, settings.metricKind); + this.logger.log( + `[checkAlerts] ${WebhookEventType.METRIC_FORECAST_LIMIT} ${settings.connectionId}:${settings.metricKind} — ` + + `current=${forecast.currentValue}, ceiling=${forecast.ceiling}, ` + + `timeToLimit=${forecast.timeToLimitMs}, threshold=${settings.alertThresholdMs}, ` + + `trend=${forecast.trendDirection}`, + ); const config = this.connectionRegistry.getConfig(settings.connectionId); - if (config) { - await this.webhookEventsProService.dispatchThroughputLimit({ - currentOpsPerSec: forecast.currentOpsPerSec, - opsCeiling: forecast.opsCeiling, - timeToLimitMs: forecast.timeToLimitMs, - threshold: settings.alertThresholdMs, - growthRate: forecast.growthRate, - timestamp: Date.now(), - instance: { host: config.host, port: config.port }, - connectionId: settings.connectionId, - }); - } + await this.webhookEventsProService.dispatchMetricForecastLimit({ + event: WebhookEventType.METRIC_FORECAST_LIMIT, + metricKind: settings.metricKind, + currentValue: forecast.currentValue, + ceiling: forecast.ceiling, + timeToLimitMs: forecast.timeToLimitMs ?? Infinity, + threshold: settings.alertThresholdMs, + growthRate: forecast.growthRate, + timestamp: Date.now(), + instance: config ? { host: config.host, port: config.port } : undefined, + connectionId: settings.connectionId, + }); + } catch (error) { + this.logger.error( + `Alert check failed for ${settings.connectionId}:${settings.metricKind}: ${error instanceof Error ? error.message : 'Unknown error'}`, + ); } } - } catch (err) { - this.logger.warn(`Alert check failed: ${err instanceof Error ? err.message : err}`); + } catch (error) { + this.logger.error( + `Alert check iteration failed: ${error instanceof Error ? error.message : 'Unknown error'}`, + ); } } } diff --git a/apps/api/src/metric-forecasting/pipes/metric-kind-validation.pipe.ts b/apps/api/src/metric-forecasting/pipes/metric-kind-validation.pipe.ts new file mode 100644 index 00000000..8c4b6f60 --- /dev/null +++ b/apps/api/src/metric-forecasting/pipes/metric-kind-validation.pipe.ts @@ -0,0 +1,16 @@ +import { PipeTransform, Injectable, BadRequestException } from '@nestjs/common'; +import { ALL_METRIC_KINDS, type MetricKind } from '@betterdb/shared'; + +const VALID_METRIC_KINDS = new Set(ALL_METRIC_KINDS); + +@Injectable() +export class MetricKindValidationPipe implements PipeTransform { + transform(value: string): MetricKind { + if (!VALID_METRIC_KINDS.has(value)) { + throw new BadRequestException( + `Invalid metric kind '${value}'. Valid kinds: ${ALL_METRIC_KINDS.join(', ')}`, + ); + } + return value as MetricKind; + } +} diff --git a/apps/api/src/prometheus/prometheus.module.ts b/apps/api/src/prometheus/prometheus.module.ts index 1dfc8675..519ee2d3 100644 --- a/apps/api/src/prometheus/prometheus.module.ts +++ b/apps/api/src/prometheus/prometheus.module.ts @@ -6,7 +6,7 @@ import { WebhooksModule } from '../webhooks/webhooks.module'; import { SlowLogAnalyticsModule } from '../slowlog-analytics/slowlog-analytics.module'; import { CommandLogAnalyticsModule } from '../commandlog-analytics/commandlog-analytics.module'; import { HealthModule } from '../health/health.module'; -import { ThroughputForecastingModule } from '../throughput-forecasting/throughput-forecasting.module'; +import { MetricForecastingModule } from '../metric-forecasting/metric-forecasting.module'; @Module({ imports: [ @@ -15,7 +15,7 @@ import { ThroughputForecastingModule } from '../throughput-forecasting/throughpu SlowLogAnalyticsModule, CommandLogAnalyticsModule, forwardRef(() => HealthModule), - ThroughputForecastingModule, + MetricForecastingModule, ], controllers: [PrometheusController], providers: [PrometheusService], diff --git a/apps/api/src/prometheus/prometheus.service.ts b/apps/api/src/prometheus/prometheus.service.ts index 18bf2786..4a88dbf1 100644 --- a/apps/api/src/prometheus/prometheus.service.ts +++ b/apps/api/src/prometheus/prometheus.service.ts @@ -21,7 +21,8 @@ import { MultiConnectionPoller, ConnectionContext, } from '../common/services/multi-connection-poller'; -import { ThroughputForecastingService } from '../throughput-forecasting/throughput-forecasting.service'; +import { MetricForecastingService } from '../metric-forecasting/metric-forecasting.service'; +import { ALL_METRIC_KINDS } from '@betterdb/shared'; // Per-connection state for tracking previous values and stale labels interface ConnectionMetricState { @@ -155,8 +156,8 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule private anomalyDetectionBufferMean: Gauge; private anomalyDetectionBufferStdDev: Gauge; - // Throughput Forecasting Metrics - private throughputTimeToLimitSeconds: Gauge; + // Metric Forecasting + private metricForecastTimeToLimitSeconds: Gauge; constructor( @Inject('STORAGE_CLIENT') private storage: StoragePort, @@ -174,7 +175,7 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule @Inject(WEBHOOK_EVENTS_ENTERPRISE_SERVICE) private readonly webhookEventsEnterpriseService?: IWebhookEventsEnterpriseService, @Optional() - private readonly throughputForecastingService?: ThroughputForecastingService, + private readonly metricForecastingService?: MetricForecastingService, ) { super(connectionRegistry); this.pollIntervalMs = this.configService.get('PROMETHEUS_POLL_INTERVAL_MS', 5000); @@ -544,10 +545,11 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule ['metric_type'], ); - // Throughput Forecasting - this.throughputTimeToLimitSeconds = this.createGauge( - 'throughput_time_to_limit_seconds', - 'Projected seconds until ops/sec reaches configured ceiling. Only exported when a ceiling is configured.', + // Metric Forecasting + this.metricForecastTimeToLimitSeconds = this.createGauge( + 'metric_forecast_time_to_limit_seconds', + 'Projected seconds until metric reaches configured ceiling.', + ['metric_kind'], ); } @@ -582,22 +584,30 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule await this.updateClientMetrics(connectionId, connLabel, state); await this.updateSlowlogMetrics(connectionId, connLabel, state); await this.updateCommandlogMetrics(connectionId, connLabel, state); - await this.updateThroughputMetrics(connectionId, connLabel); + await this.updateMetricForecastMetrics(connectionId, connLabel); } - private async updateThroughputMetrics(connectionId: string, connLabel: string): Promise { - if (!this.throughputForecastingService) { - return; - } - try { - const forecast = await this.throughputForecastingService.getForecast(connectionId); - if (forecast.opsCeiling !== null && !forecast.insufficientData && forecast.enabled) { - const value = forecast.timeToLimitMs !== null ? forecast.timeToLimitMs / 1000 : -1; - this.throughputTimeToLimitSeconds.labels(connLabel).set(value); + private async updateMetricForecastMetrics( + connectionId: string, + connLabel: string, + ): Promise { + if (!this.metricForecastingService) return; + + // Only export metrics for metric kinds that already have settings configured. + // Avoids auto-provisioning settings rows as a side effect of Prometheus scraping. + for (const metricKind of ALL_METRIC_KINDS) { + try { + const settings = await this.storage.getMetricForecastSettings(connectionId, metricKind); + if (!settings || !settings.enabled) continue; + + const forecast = await this.metricForecastingService.getForecast(connectionId, metricKind); + if (forecast.ceiling !== null && !forecast.insufficientData && forecast.enabled) { + const value = forecast.timeToLimitMs !== null ? forecast.timeToLimitMs / 1000 : -1; + this.metricForecastTimeToLimitSeconds.labels(connLabel, metricKind).set(value); + } + } catch { + // Silently skip if forecasting unavailable for this metric kind } - } catch { - this.logger.warn(`Failed to update throughput metrics for connection ${connectionId}`); - // Silently skip if forecasting unavailable } } diff --git a/apps/api/src/settings/settings.service.ts b/apps/api/src/settings/settings.service.ts index dcdb2b97..cfbbc77d 100644 --- a/apps/api/src/settings/settings.service.ts +++ b/apps/api/src/settings/settings.service.ts @@ -63,14 +63,14 @@ export class SettingsService implements OnModuleInit, OnModuleDestroy { this.configService.get('ANOMALY_PROMETHEUS_INTERVAL_MS', '30000'), 10, ), - throughputForecastingEnabled: - this.configService.get('THROUGHPUT_FORECASTING_ENABLED', 'true') === 'true', - throughputForecastingDefaultRollingWindowMs: parseInt( - this.configService.get('THROUGHPUT_FORECASTING_DEFAULT_ROLLING_WINDOW_MS', '21600000'), + metricForecastingEnabled: + this.configService.get('METRIC_FORECASTING_ENABLED', 'true') === 'true', + metricForecastingDefaultRollingWindowMs: parseInt( + this.configService.get('METRIC_FORECASTING_DEFAULT_ROLLING_WINDOW_MS', '21600000'), 10, ), - throughputForecastingDefaultAlertThresholdMs: parseInt( - this.configService.get('THROUGHPUT_FORECASTING_DEFAULT_ALERT_THRESHOLD_MS', '7200000'), + metricForecastingDefaultAlertThresholdMs: parseInt( + this.configService.get('METRIC_FORECASTING_DEFAULT_ALERT_THRESHOLD_MS', '7200000'), 10, ), createdAt: now, diff --git a/apps/api/src/storage/adapters/base-sql.adapter.ts b/apps/api/src/storage/adapters/base-sql.adapter.ts index 6124229d..36cc7abb 100644 --- a/apps/api/src/storage/adapters/base-sql.adapter.ts +++ b/apps/api/src/storage/adapters/base-sql.adapter.ts @@ -266,9 +266,9 @@ export class RowMappers { anomalyPollIntervalMs: row.anomaly_poll_interval_ms, anomalyCacheTtlMs: row.anomaly_cache_ttl_ms, anomalyPrometheusIntervalMs: row.anomaly_prometheus_interval_ms, - throughputForecastingEnabled: !!row.throughput_forecasting_enabled, - throughputForecastingDefaultRollingWindowMs: row.throughput_forecasting_default_rolling_window_ms, - throughputForecastingDefaultAlertThresholdMs: row.throughput_forecasting_default_alert_threshold_ms, + metricForecastingEnabled: !!row.throughput_forecasting_enabled, + metricForecastingDefaultRollingWindowMs: row.throughput_forecasting_default_rolling_window_ms, + metricForecastingDefaultAlertThresholdMs: row.throughput_forecasting_default_alert_threshold_ms, updatedAt: typeof row.updated_at === 'string' ? parseInt(row.updated_at, 10) : row.updated_at, createdAt: typeof row.created_at === 'string' ? parseInt(row.created_at, 10) : row.created_at, }; diff --git a/apps/api/src/storage/adapters/memory.adapter.ts b/apps/api/src/storage/adapters/memory.adapter.ts index d6bed995..fe2ef838 100644 --- a/apps/api/src/storage/adapters/memory.adapter.ts +++ b/apps/api/src/storage/adapters/memory.adapter.ts @@ -34,7 +34,12 @@ import { HotKeyQueryOptions, DatabaseConnectionConfig, } from '../../common/interfaces/storage-port.interface'; -import type { VectorIndexSnapshot, VectorIndexSnapshotQueryOptions, ThroughputSettings } from '@betterdb/shared'; +import type { + VectorIndexSnapshot, + VectorIndexSnapshotQueryOptions, + MetricForecastSettings, + MetricKind, +} from '@betterdb/shared'; export class MemoryAdapter implements StoragePort { private aclEntries: StoredAclEntry[] = []; @@ -47,7 +52,7 @@ export class MemoryAdapter implements StoragePort { private latencyHistograms: StoredLatencyHistogram[] = []; private memorySnapshots: StoredMemorySnapshot[] = []; private vectorIndexSnapshots: VectorIndexSnapshot[] = []; - private throughputSettings: Map = new Map(); + private metricForecastSettings: Map = new Map(); private settings: AppSettings | null = null; private webhooks: Map = new Map(); private deliveries: Map = new Map(); @@ -724,16 +729,16 @@ export class MemoryAdapter implements StoragePort { if (updates.anomalyPrometheusIntervalMs !== undefined) { validUpdates.anomalyPrometheusIntervalMs = updates.anomalyPrometheusIntervalMs; } - if (updates.throughputForecastingEnabled !== undefined) { - validUpdates.throughputForecastingEnabled = updates.throughputForecastingEnabled; + if (updates.metricForecastingEnabled !== undefined) { + validUpdates.metricForecastingEnabled = updates.metricForecastingEnabled; } - if (updates.throughputForecastingDefaultRollingWindowMs !== undefined) { - validUpdates.throughputForecastingDefaultRollingWindowMs = - updates.throughputForecastingDefaultRollingWindowMs; + if (updates.metricForecastingDefaultRollingWindowMs !== undefined) { + validUpdates.metricForecastingDefaultRollingWindowMs = + updates.metricForecastingDefaultRollingWindowMs; } - if (updates.throughputForecastingDefaultAlertThresholdMs !== undefined) { - validUpdates.throughputForecastingDefaultAlertThresholdMs = - updates.throughputForecastingDefaultAlertThresholdMs; + if (updates.metricForecastingDefaultAlertThresholdMs !== undefined) { + validUpdates.metricForecastingDefaultAlertThresholdMs = + updates.metricForecastingDefaultAlertThresholdMs; } this.settings = { @@ -1348,25 +1353,39 @@ export class MemoryAdapter implements StoragePort { } } - // Throughput Forecasting Settings - async getThroughputSettings( + // Metric Forecasting Settings + + private metricForecastKey(connectionId: string, metricKind: MetricKind): string { + return `${connectionId}:${metricKind}`; + } + + async getMetricForecastSettings( connectionId: string, - ): Promise { - return this.throughputSettings.get(connectionId) ?? null; + metricKind: MetricKind, + ): Promise { + return this.metricForecastSettings.get(this.metricForecastKey(connectionId, metricKind)) ?? null; } - async saveThroughputSettings( - settings: ThroughputSettings, - ): Promise { - this.throughputSettings.set(settings.connectionId, settings); + async saveMetricForecastSettings( + settings: MetricForecastSettings, + ): Promise { + this.metricForecastSettings.set( + this.metricForecastKey(settings.connectionId, settings.metricKind), + settings, + ); return { ...settings }; } - async deleteThroughputSettings(connectionId: string): Promise { - return this.throughputSettings.delete(connectionId); + async deleteMetricForecastSettings( + connectionId: string, + metricKind: MetricKind, + ): Promise { + return this.metricForecastSettings.delete(this.metricForecastKey(connectionId, metricKind)); } - async getActiveThroughputSettings(): Promise { - return [...this.throughputSettings.values()].filter((s) => s.enabled && s.opsCeiling !== null); + async getActiveMetricForecastSettings(): Promise { + return [...this.metricForecastSettings.values()].filter( + (s) => s.enabled && s.ceiling !== null, + ); } } diff --git a/apps/api/src/storage/adapters/postgres.adapter.ts b/apps/api/src/storage/adapters/postgres.adapter.ts index 51eba4b2..0298955b 100644 --- a/apps/api/src/storage/adapters/postgres.adapter.ts +++ b/apps/api/src/storage/adapters/postgres.adapter.ts @@ -38,7 +38,8 @@ import { import type { VectorIndexSnapshot, VectorIndexSnapshotQueryOptions, - ThroughputSettings, + MetricForecastSettings, + MetricKind, } from '@betterdb/shared'; import { PostgresDialect, RowMappers } from './base-sql.adapter'; @@ -1170,13 +1171,15 @@ export class PostgresAdapter implements StoragePort { ALTER TABLE app_settings ADD COLUMN IF NOT EXISTS throughput_forecasting_default_rolling_window_ms INTEGER NOT NULL DEFAULT 21600000; ALTER TABLE app_settings ADD COLUMN IF NOT EXISTS throughput_forecasting_default_alert_threshold_ms INTEGER NOT NULL DEFAULT 7200000; - CREATE TABLE IF NOT EXISTS throughput_settings ( - connection_id TEXT PRIMARY KEY, + CREATE TABLE IF NOT EXISTS metric_forecast_settings ( + connection_id TEXT NOT NULL, + metric_kind TEXT NOT NULL, enabled BOOLEAN NOT NULL DEFAULT true, - ops_ceiling INTEGER, + ceiling DOUBLE PRECISION, rolling_window_ms INTEGER NOT NULL DEFAULT 21600000, alert_threshold_ms INTEGER NOT NULL DEFAULT 7200000, - updated_at BIGINT NOT NULL + updated_at BIGINT NOT NULL, + PRIMARY KEY (connection_id, metric_kind) ); CREATE TABLE IF NOT EXISTS webhooks ( @@ -2245,9 +2248,9 @@ export class PostgresAdapter implements StoragePort { settings.anomalyPollIntervalMs, settings.anomalyCacheTtlMs, settings.anomalyPrometheusIntervalMs, - settings.throughputForecastingEnabled, - settings.throughputForecastingDefaultRollingWindowMs, - settings.throughputForecastingDefaultAlertThresholdMs, + settings.metricForecastingEnabled, + settings.metricForecastingDefaultRollingWindowMs, + settings.metricForecastingDefaultAlertThresholdMs, now, settings.createdAt || now, ], @@ -3504,48 +3507,54 @@ export class PostgresAdapter implements StoragePort { ]); } - // Throughput Forecasting Settings - async getThroughputSettings(connectionId: string): Promise { - if (!this.pool) { - throw new Error('Database not initialized'); - } - const result = await this.pool.query( - 'SELECT * FROM throughput_settings WHERE connection_id = $1', - [connectionId], - ); - if (result.rows.length === 0) { - return null; - } - const row = result.rows[0]; + // Metric Forecasting Settings + + private mapMetricForecastRow(row: any): MetricForecastSettings { return { connectionId: row.connection_id, + metricKind: row.metric_kind as MetricKind, enabled: row.enabled, - opsCeiling: row.ops_ceiling ?? null, + ceiling: row.ceiling ?? null, rollingWindowMs: row.rolling_window_ms, alertThresholdMs: row.alert_threshold_ms, updatedAt: Number(row.updated_at), }; } - async saveThroughputSettings(settings: ThroughputSettings): Promise { - if (!this.pool) { - throw new Error('Database not initialized'); - } + async getMetricForecastSettings( + connectionId: string, + metricKind: MetricKind, + ): Promise { + if (!this.pool) throw new Error('Database not initialized'); + const result = await this.pool.query( + 'SELECT * FROM metric_forecast_settings WHERE connection_id = $1 AND metric_kind = $2', + [connectionId, metricKind], + ); + if (result.rows.length === 0) return null; + return this.mapMetricForecastRow(result.rows[0]); + } + + async saveMetricForecastSettings( + settings: MetricForecastSettings, + ): Promise { + if (!this.pool) throw new Error('Database not initialized'); await this.pool.query( ` - INSERT INTO throughput_settings (connection_id, enabled, ops_ceiling, rolling_window_ms, alert_threshold_ms, updated_at) - VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT(connection_id) DO UPDATE SET + INSERT INTO metric_forecast_settings + (connection_id, metric_kind, enabled, ceiling, rolling_window_ms, alert_threshold_ms, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT(connection_id, metric_kind) DO UPDATE SET enabled = EXCLUDED.enabled, - ops_ceiling = EXCLUDED.ops_ceiling, + ceiling = EXCLUDED.ceiling, rolling_window_ms = EXCLUDED.rolling_window_ms, alert_threshold_ms = EXCLUDED.alert_threshold_ms, updated_at = EXCLUDED.updated_at `, [ settings.connectionId, + settings.metricKind, settings.enabled, - settings.opsCeiling, + settings.ceiling, settings.rollingWindowMs, settings.alertThresholdMs, settings.updatedAt, @@ -3554,27 +3563,23 @@ export class PostgresAdapter implements StoragePort { return { ...settings }; } - async deleteThroughputSettings(connectionId: string): Promise { + async deleteMetricForecastSettings( + connectionId: string, + metricKind: MetricKind, + ): Promise { if (!this.pool) throw new Error('Database not initialized'); const result = await this.pool.query( - 'DELETE FROM throughput_settings WHERE connection_id = $1', - [connectionId], + 'DELETE FROM metric_forecast_settings WHERE connection_id = $1 AND metric_kind = $2', + [connectionId, metricKind], ); return (result.rowCount ?? 0) > 0; } - async getActiveThroughputSettings(): Promise { + async getActiveMetricForecastSettings(): Promise { if (!this.pool) throw new Error('Database not initialized'); const result = await this.pool.query( - 'SELECT * FROM throughput_settings WHERE enabled = true AND ops_ceiling IS NOT NULL', + 'SELECT * FROM metric_forecast_settings WHERE enabled = true AND ceiling IS NOT NULL', ); - return result.rows.map((row: any) => ({ - connectionId: row.connection_id, - enabled: row.enabled, - opsCeiling: row.ops_ceiling, - rollingWindowMs: row.rolling_window_ms, - alertThresholdMs: row.alert_threshold_ms, - updatedAt: Number(row.updated_at), - })); + return result.rows.map((row: any) => this.mapMetricForecastRow(row)); } } diff --git a/apps/api/src/storage/adapters/sqlite.adapter.ts b/apps/api/src/storage/adapters/sqlite.adapter.ts index a6b9e6fe..9a247f6f 100644 --- a/apps/api/src/storage/adapters/sqlite.adapter.ts +++ b/apps/api/src/storage/adapters/sqlite.adapter.ts @@ -34,27 +34,32 @@ import { LatencySnapshotQueryOptions, StoredMemorySnapshot, MemorySnapshotQueryOptions, - ThroughputSettings, DatabaseConnectionConfig, HotKeyEntry, HotKeyQueryOptions, StoredLatencyHistogram, } from '../../common/interfaces/storage-port.interface'; -import type { VectorIndexSnapshot, VectorIndexSnapshotQueryOptions } from '@betterdb/shared'; +import type { + VectorIndexSnapshot, + VectorIndexSnapshotQueryOptions, + MetricForecastSettings, + MetricKind, +} from '@betterdb/shared'; import { SqliteDialect, RowMappers } from './base-sql.adapter'; export interface SqliteAdapterConfig { filepath: string; } -type ThroughputSettingsRow = { +type MetricForecastSettingsRow = { connection_id: string; + metric_kind: string; enabled: number; - ops_ceiling: number | null; + ceiling: number | null; rolling_window_ms: number; alert_threshold_ms: number; updated_at: number; -} | null; +}; export class SqliteAdapter implements StoragePort { private db: Database.Database | null = null; @@ -1055,13 +1060,15 @@ export class SqliteAdapter implements StoragePort { created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000) ); - CREATE TABLE IF NOT EXISTS throughput_settings ( - connection_id TEXT PRIMARY KEY, + CREATE TABLE IF NOT EXISTS metric_forecast_settings ( + connection_id TEXT NOT NULL, + metric_kind TEXT NOT NULL, enabled INTEGER NOT NULL DEFAULT 1, - ops_ceiling INTEGER, + ceiling REAL, rolling_window_ms INTEGER NOT NULL DEFAULT 21600000, alert_threshold_ms INTEGER NOT NULL DEFAULT 7200000, - updated_at INTEGER NOT NULL + updated_at INTEGER NOT NULL, + PRIMARY KEY (connection_id, metric_kind) ); CREATE TABLE IF NOT EXISTS webhooks ( @@ -1982,9 +1989,9 @@ export class SqliteAdapter implements StoragePort { settings.anomalyPollIntervalMs, settings.anomalyCacheTtlMs, settings.anomalyPrometheusIntervalMs, - settings.throughputForecastingEnabled ? 1 : 0, - settings.throughputForecastingDefaultRollingWindowMs, - settings.throughputForecastingDefaultAlertThresholdMs, + settings.metricForecastingEnabled ? 1 : 0, + settings.metricForecastingDefaultRollingWindowMs, + settings.metricForecastingDefaultAlertThresholdMs, now, settings.createdAt || now, ); @@ -3260,38 +3267,45 @@ export class SqliteAdapter implements StoragePort { this.db.prepare('UPDATE agent_tokens SET last_used_at = ? WHERE id = ?').run(Date.now(), id); } - // Throughput Forecasting Settings - async getThroughputSettings(connectionId: string): Promise { - if (!this.db) { - throw new Error('Database not initialized'); - } - const row = this.db - .prepare('SELECT * FROM throughput_settings WHERE connection_id = ?') - .get(connectionId) as unknown as ThroughputSettingsRow; + // Metric Forecasting Settings - if (!row) { - return null; - } + private mapMetricForecastRow(row: MetricForecastSettingsRow): MetricForecastSettings { return { connectionId: row.connection_id, + metricKind: row.metric_kind as MetricKind, enabled: !!row.enabled, - opsCeiling: row.ops_ceiling ?? null, + ceiling: row.ceiling ?? null, rollingWindowMs: row.rolling_window_ms, alertThresholdMs: row.alert_threshold_ms, updatedAt: row.updated_at, }; } - async saveThroughputSettings(settings: ThroughputSettings): Promise { + async getMetricForecastSettings( + connectionId: string, + metricKind: MetricKind, + ): Promise { + if (!this.db) throw new Error('Database not initialized'); + const row = this.db + .prepare('SELECT * FROM metric_forecast_settings WHERE connection_id = ? AND metric_kind = ?') + .get(connectionId, metricKind) as unknown as MetricForecastSettingsRow; + if (!row) return null; + return this.mapMetricForecastRow(row); + } + + async saveMetricForecastSettings( + settings: MetricForecastSettings, + ): Promise { if (!this.db) throw new Error('Database not initialized'); this.db .prepare( ` - INSERT INTO throughput_settings (connection_id, enabled, ops_ceiling, rolling_window_ms, alert_threshold_ms, updated_at) - VALUES (?, ?, ?, ?, ?, ?) - ON CONFLICT(connection_id) DO UPDATE SET + INSERT INTO metric_forecast_settings + (connection_id, metric_kind, enabled, ceiling, rolling_window_ms, alert_threshold_ms, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(connection_id, metric_kind) DO UPDATE SET enabled = excluded.enabled, - ops_ceiling = excluded.ops_ceiling, + ceiling = excluded.ceiling, rolling_window_ms = excluded.rolling_window_ms, alert_threshold_ms = excluded.alert_threshold_ms, updated_at = excluded.updated_at @@ -3299,8 +3313,9 @@ export class SqliteAdapter implements StoragePort { ) .run( settings.connectionId, + settings.metricKind, settings.enabled ? 1 : 0, - settings.opsCeiling, + settings.ceiling, settings.rollingWindowMs, settings.alertThresholdMs, settings.updatedAt, @@ -3308,29 +3323,24 @@ export class SqliteAdapter implements StoragePort { return { ...settings }; } - async deleteThroughputSettings(connectionId: string): Promise { + async deleteMetricForecastSettings( + connectionId: string, + metricKind: MetricKind, + ): Promise { if (!this.db) throw new Error('Database not initialized'); const result = this.db - .prepare('DELETE FROM throughput_settings WHERE connection_id = ?') - .run(connectionId); + .prepare('DELETE FROM metric_forecast_settings WHERE connection_id = ? AND metric_kind = ?') + .run(connectionId, metricKind); return result.changes > 0; } - async getActiveThroughputSettings(): Promise { + async getActiveMetricForecastSettings(): Promise { if (!this.db) throw new Error('Database not initialized'); const rows = this.db - .prepare('SELECT * FROM throughput_settings WHERE enabled = 1 AND ops_ceiling IS NOT NULL') - .all() as ThroughputSettingsRow[]; - if (!rows || rows.length === 0) { - return []; - } - return rows.map((row) => ({ - connectionId: row!.connection_id, - enabled: !!row!.enabled, - opsCeiling: row!.ops_ceiling, - rollingWindowMs: row!.rolling_window_ms, - alertThresholdMs: row!.alert_threshold_ms, - updatedAt: row!.updated_at, - })); + .prepare( + 'SELECT * FROM metric_forecast_settings WHERE enabled = 1 AND ceiling IS NOT NULL', + ) + .all() as MetricForecastSettingsRow[]; + return rows.map((row) => this.mapMetricForecastRow(row)); } } diff --git a/apps/api/src/throughput-forecasting/__tests__/throughput-forecasting.service.spec.ts b/apps/api/src/throughput-forecasting/__tests__/throughput-forecasting.service.spec.ts deleted file mode 100644 index 7a82ae8e..00000000 --- a/apps/api/src/throughput-forecasting/__tests__/throughput-forecasting.service.spec.ts +++ /dev/null @@ -1,554 +0,0 @@ -import { Test, TestingModule } from '@nestjs/testing'; -import { MemoryAdapter } from '../../storage/adapters/memory.adapter'; -import { ThroughputForecastingService } from '../throughput-forecasting.service'; -import { SettingsService } from '../../settings/settings.service'; -import { ConnectionRegistry } from '../../connections/connection-registry.service'; -import type { AppSettings } from '@betterdb/shared'; -import type { ThroughputSettings } from '@betterdb/shared'; -import type { StoredMemorySnapshot } from '../../common/interfaces/storage-port.interface'; - -// ── Test Helpers ── - -function mockGlobalSettings(overrides?: Partial): AppSettings { - return { - id: 1, - auditPollIntervalMs: 60000, - clientAnalyticsPollIntervalMs: 60000, - anomalyPollIntervalMs: 1000, - anomalyCacheTtlMs: 3600000, - anomalyPrometheusIntervalMs: 30000, - throughputForecastingEnabled: true, - throughputForecastingDefaultRollingWindowMs: 21600000, - throughputForecastingDefaultAlertThresholdMs: 7200000, - createdAt: Date.now(), - updatedAt: Date.now(), - ...overrides, - }; -} - -function makeThroughputSettings(overrides?: Partial): ThroughputSettings { - return { - connectionId: 'conn-1', - enabled: true, - opsCeiling: null, - rollingWindowMs: 21600000, - alertThresholdMs: 7200000, - updatedAt: Date.now(), - ...overrides, - }; -} - -function generateSnapshots(opts: { - count: number; - startTime: number; - intervalMs: number; - startOps: number; - endOps: number; - connectionId?: string; -}): StoredMemorySnapshot[] { - const snapshots: StoredMemorySnapshot[] = []; - for (let i = 0; i < opts.count; i++) { - const t = i / (opts.count - 1); - snapshots.push({ - id: `snap-${i}`, - timestamp: opts.startTime + i * opts.intervalMs, - usedMemory: 1000000, - usedMemoryRss: 1200000, - usedMemoryPeak: 1500000, - memFragmentationRatio: 1.2, - maxmemory: 0, - allocatorFragRatio: 1.0, - opsPerSec: Math.round(opts.startOps + t * (opts.endOps - opts.startOps)), - cpuSys: 1.0, - cpuUser: 2.0, - ioThreadedReads: 0, - ioThreadedWrites: 0, - connectionId: opts.connectionId ?? 'conn-1', - }); - } - return snapshots; -} - -// ── Test Suite ── - -describe('ThroughputForecastingService', () => { - let service: ThroughputForecastingService; - let storage: MemoryAdapter; - let settingsService: { getCachedSettings: jest.Mock }; - - beforeEach(async () => { - storage = new MemoryAdapter(); - await storage.initialize(); - - settingsService = { - getCachedSettings: jest.fn().mockReturnValue(mockGlobalSettings()), - }; - - const module: TestingModule = await Test.createTestingModule({ - providers: [ - ThroughputForecastingService, - { provide: 'STORAGE_CLIENT', useValue: storage }, - { provide: SettingsService, useValue: settingsService }, - { - provide: ConnectionRegistry, - useValue: { list: jest.fn().mockReturnValue([]), getConfig: jest.fn() }, - }, - ], - }).compile(); - - service = module.get(ThroughputForecastingService); - }); - - // ── Slice 1: Storage Round-Trip ── - - describe('Slice 1: Storage round-trip', () => { - it('1a: saves and retrieves throughput settings', async () => { - const settings = makeThroughputSettings({ connectionId: 'conn-1', opsCeiling: 80000 }); - await storage.saveThroughputSettings(settings); - const result = await storage.getThroughputSettings('conn-1'); - expect(result).not.toBeNull(); - expect(result!.connectionId).toBe('conn-1'); - expect(result!.opsCeiling).toBe(80000); - expect(result!.enabled).toBe(true); - expect(result!.rollingWindowMs).toBe(21600000); - }); - - it('1b: returns null for missing connection', async () => { - const result = await storage.getThroughputSettings('conn-unknown'); - expect(result).toBeNull(); - }); - - it('1c: upsert overwrites existing settings', async () => { - await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 50000 })); - await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 90000 })); - const result = await storage.getThroughputSettings('conn-1'); - expect(result!.opsCeiling).toBe(90000); - }); - - it('1d: delete removes settings and returns true', async () => { - await storage.saveThroughputSettings(makeThroughputSettings()); - const deleted = await storage.deleteThroughputSettings('conn-1'); - expect(deleted).toBe(true); - const result = await storage.getThroughputSettings('conn-1'); - expect(result).toBeNull(); - }); - - it('1e: delete non-existent returns false', async () => { - const deleted = await storage.deleteThroughputSettings('conn-unknown'); - expect(deleted).toBe(false); - }); - - it('1f: getActiveThroughputSettings filters correctly', async () => { - await storage.saveThroughputSettings( - makeThroughputSettings({ connectionId: 'conn-a', enabled: true, opsCeiling: 80000 }), - ); - await storage.saveThroughputSettings( - makeThroughputSettings({ connectionId: 'conn-b', enabled: true, opsCeiling: null }), - ); - await storage.saveThroughputSettings( - makeThroughputSettings({ connectionId: 'conn-c', enabled: false, opsCeiling: 80000 }), - ); - const active = await storage.getActiveThroughputSettings(); - expect(active).toHaveLength(1); - expect(active[0].connectionId).toBe('conn-a'); - }); - }); - - // ── Slice 2: Rising Trend, No Ceiling ── - - describe('Slice 2: Rising trend, no ceiling', () => { - it('2a: returns rising trend with correct direction and growth', async () => { - const now = Date.now(); - const snapshots = generateSnapshots({ - count: 60, - startTime: now - 60 * 60_000, - intervalMs: 60_000, - startOps: 10_000, - endOps: 20_000, - connectionId: 'conn-1', - }); - await storage.saveMemorySnapshots(snapshots, 'conn-1'); - - const forecast = await service.getForecast('conn-1'); - - expect(forecast.mode).toBe('trend'); - expect(forecast.trendDirection).toBe('rising'); - expect(forecast.growthPercent).toBeGreaterThan(5); - expect(forecast.timeToLimitMs).toBeNull(); - expect(forecast.opsCeiling).toBeNull(); - expect(forecast.currentOpsPerSec).toBeGreaterThanOrEqual(19_000); - expect(forecast.insufficientData).toBe(false); - expect(forecast.enabled).toBe(true); - expect(forecast.dataPointCount).toBe(60); - }); - }); - - // ── Slice 3: Rising Trend with Ceiling ── - - describe('Slice 3: Rising trend with ceiling', () => { - it('3a: returns forecast with time-to-limit', async () => { - const now = Date.now(); - const snapshots = generateSnapshots({ - count: 60, - startTime: now - 60 * 60_000, - intervalMs: 60_000, - startOps: 40_000, - endOps: 50_000, - connectionId: 'conn-1', - }); - await storage.saveMemorySnapshots(snapshots, 'conn-1'); - await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 80_000 })); - - const forecast = await service.getForecast('conn-1'); - - expect(forecast.mode).toBe('forecast'); - expect(forecast.timeToLimitMs).toBeGreaterThan(0); - expect(forecast.timeToLimitHuman).toContain('at current growth rate'); - expect(forecast.opsCeiling).toBe(80_000); - }); - - it('3b: time-to-limit is approximately correct', async () => { - const now = Date.now(); - // Growth: 10k/hr, current ~50k, ceiling 80k => ~3h to limit - const snapshots = generateSnapshots({ - count: 60, - startTime: now - 60 * 60_000, - intervalMs: 60_000, - startOps: 40_000, - endOps: 50_000, - connectionId: 'conn-1', - }); - await storage.saveMemorySnapshots(snapshots, 'conn-1'); - await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 80_000 })); - - const forecast = await service.getForecast('conn-1'); - const threeHoursMs = 3 * 3_600_000; - - expect(forecast.timeToLimitMs).toBeGreaterThan(threeHoursMs * 0.8); - expect(forecast.timeToLimitMs).toBeLessThan(threeHoursMs * 1.2); - }); - }); - - // ── Slice 4: Falling/Stable Trend with Ceiling ── - - describe('Slice 4: Falling/stable trend with ceiling', () => { - it('4a: falling trend returns not projected', async () => { - const now = Date.now(); - const snapshots = generateSnapshots({ - count: 60, - startTime: now - 60 * 60_000, - intervalMs: 60_000, - startOps: 50_000, - endOps: 40_000, - connectionId: 'conn-1', - }); - await storage.saveMemorySnapshots(snapshots, 'conn-1'); - await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 80_000 })); - - const forecast = await service.getForecast('conn-1'); - - expect(forecast.mode).toBe('forecast'); - expect(forecast.trendDirection).toBe('falling'); - expect(forecast.timeToLimitMs).toBeNull(); - expect(forecast.timeToLimitHuman).toContain('Not projected'); - }); - - it('4b: stable trend returns not projected', async () => { - const now = Date.now(); - const snapshots = generateSnapshots({ - count: 60, - startTime: now - 60 * 60_000, - intervalMs: 60_000, - startOps: 50_000, - endOps: 50_000, - connectionId: 'conn-1', - }); - await storage.saveMemorySnapshots(snapshots, 'conn-1'); - await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 80_000 })); - - const forecast = await service.getForecast('conn-1'); - - expect(forecast.mode).toBe('forecast'); - expect(forecast.trendDirection).toBe('stable'); - expect(forecast.timeToLimitMs).toBeNull(); - }); - }); - - // ── Slice 5: Ceiling Already Exceeded ── - - describe('Slice 5: Ceiling already exceeded', () => { - it('5a: returns exceeded when ops above ceiling', async () => { - const now = Date.now(); - const snapshots = generateSnapshots({ - count: 60, - startTime: now - 60 * 60_000, - intervalMs: 60_000, - startOps: 85_000, - endOps: 90_000, - connectionId: 'conn-1', - }); - await storage.saveMemorySnapshots(snapshots, 'conn-1'); - await storage.saveThroughputSettings(makeThroughputSettings({ opsCeiling: 80_000 })); - - const forecast = await service.getForecast('conn-1'); - - expect(forecast.mode).toBe('forecast'); - expect(forecast.timeToLimitHuman).toMatch(/exceeded/i); - }); - }); - - // ── Slice 6: Insufficient Data ── - - describe('Slice 6: Insufficient data', () => { - it('6a: no snapshots returns insufficient data', async () => { - const forecast = await service.getForecast('conn-1'); - expect(forecast.insufficientData).toBe(true); - expect(forecast.insufficientDataMessage).toBeDefined(); - }); - - it('6b: only 2 snapshots returns insufficient', async () => { - const now = Date.now(); - const snapshots = generateSnapshots({ - count: 2, - startTime: now - 10 * 60_000, - intervalMs: 5 * 60_000, - startOps: 10_000, - endOps: 20_000, - connectionId: 'conn-1', - }); - await storage.saveMemorySnapshots(snapshots, 'conn-1'); - - const forecast = await service.getForecast('conn-1'); - expect(forecast.insufficientData).toBe(true); - }); - - it('6c: 5 snapshots but < 30 min span returns insufficient', async () => { - const now = Date.now(); - const snapshots = generateSnapshots({ - count: 5, - startTime: now - 20 * 60_000, - intervalMs: 5 * 60_000, - startOps: 10_000, - endOps: 20_000, - connectionId: 'conn-1', - }); - await storage.saveMemorySnapshots(snapshots, 'conn-1'); - - const forecast = await service.getForecast('conn-1'); - expect(forecast.insufficientData).toBe(true); - }); - - it('6d: exactly 30 min is sufficient', async () => { - const now = Date.now(); - const snapshots = generateSnapshots({ - count: 31, - startTime: now - 30 * 60_000, - intervalMs: 60_000, - startOps: 10_000, - endOps: 20_000, - connectionId: 'conn-1', - }); - await storage.saveMemorySnapshots(snapshots, 'conn-1'); - - const forecast = await service.getForecast('conn-1'); - expect(forecast.insufficientData).toBe(false); - }); - - it('6e: insufficient data still returns currentOpsPerSec', async () => { - const now = Date.now(); - const snapshots = generateSnapshots({ - count: 2, - startTime: now - 10 * 60_000, - intervalMs: 5 * 60_000, - startOps: 40_000, - endOps: 45_000, - connectionId: 'conn-1', - }); - await storage.saveMemorySnapshots(snapshots, 'conn-1'); - - const forecast = await service.getForecast('conn-1'); - expect(forecast.insufficientData).toBe(true); - expect(forecast.currentOpsPerSec).toBe(45_000); - }); - }); - - // ── Slice 7: Lazy Settings Creation ── - - describe('Slice 7: Lazy settings creation', () => { - it('7a: first access creates row from global defaults', async () => { - settingsService.getCachedSettings.mockReturnValue( - mockGlobalSettings({ throughputForecastingDefaultRollingWindowMs: 43200000 }), - ); - - const settings = await service.getSettings('conn-1'); - - expect(settings.rollingWindowMs).toBe(43200000); - expect(settings.enabled).toBe(true); - expect(settings.opsCeiling).toBeNull(); - - // Verify row was persisted - const persisted = await storage.getThroughputSettings('conn-1'); - expect(persisted).not.toBeNull(); - expect(persisted!.rollingWindowMs).toBe(43200000); - }); - - it('7b: global disabled returns disabled settings without persisting', async () => { - settingsService.getCachedSettings.mockReturnValue( - mockGlobalSettings({ throughputForecastingEnabled: false }), - ); - - const settings = await service.getSettings('conn-1'); - - expect(settings.enabled).toBe(false); - - // Verify no row was persisted - const persisted = await storage.getThroughputSettings('conn-1'); - expect(persisted).toBeNull(); - }); - }); - - // ── Slice 8: Update Settings and Cache Invalidation ── - - describe('Slice 8: Update settings', () => { - it('8a: update merges with existing settings', async () => { - const now = Date.now(); - await storage.saveMemorySnapshots( - generateSnapshots({ - count: 60, - startTime: now - 60 * 60_000, - intervalMs: 60_000, - startOps: 10_000, - endOps: 20_000, - connectionId: 'conn-1', - }), - 'conn-1', - ); - - const updated = await service.updateSettings('conn-1', { opsCeiling: 80_000 }); - - expect(updated.opsCeiling).toBe(80_000); - expect(updated.rollingWindowMs).toBe(21600000); // unchanged default - }); - - it('8b: update invalidates forecast cache', async () => { - const now = Date.now(); - await storage.saveMemorySnapshots( - generateSnapshots({ - count: 60, - startTime: now - 60 * 60_000, - intervalMs: 60_000, - startOps: 40_000, - endOps: 50_000, - connectionId: 'conn-1', - }), - 'conn-1', - ); - - // First forecast: trend mode (no ceiling) - const first = await service.getForecast('conn-1'); - expect(first.mode).toBe('trend'); - - // Update settings with a ceiling - await service.updateSettings('conn-1', { opsCeiling: 80_000 }); - - // Second forecast should reflect new ceiling, not cached result - const second = await service.getForecast('conn-1'); - expect(second.mode).toBe('forecast'); - }); - }); - - // ── Slice 9: Per-Connection Disabled ── - - describe('Slice 9: Per-connection disabled', () => { - it('9a: disabled connection returns enabled false', async () => { - await storage.saveThroughputSettings(makeThroughputSettings({ enabled: false })); - - const forecast = await service.getForecast('conn-1'); - - expect(forecast.enabled).toBe(false); - }); - - it('9b: re-enable returns valid forecast', async () => { - const now = Date.now(); - await storage.saveMemorySnapshots( - generateSnapshots({ - count: 60, - startTime: now - 60 * 60_000, - intervalMs: 60_000, - startOps: 10_000, - endOps: 20_000, - connectionId: 'conn-1', - }), - 'conn-1', - ); - await storage.saveThroughputSettings(makeThroughputSettings({ enabled: false })); - - // Disable returns enabled false - const disabled = await service.getForecast('conn-1'); - expect(disabled.enabled).toBe(false); - - // Re-enable - await service.updateSettings('conn-1', { enabled: true }); - const enabled = await service.getForecast('conn-1'); - expect(enabled.enabled).toBe(true); - expect(enabled.insufficientData).toBe(false); - }); - }); - - // ── Slice 10: Forecast Cache ── - - describe('Slice 10: Forecast cache', () => { - beforeEach(() => { - jest.useFakeTimers(); - }); - - afterEach(() => { - jest.useRealTimers(); - }); - - it('10a: second call within TTL uses cache', async () => { - const now = Date.now(); - await storage.saveMemorySnapshots( - generateSnapshots({ - count: 60, - startTime: now - 60 * 60_000, - intervalMs: 60_000, - startOps: 10_000, - endOps: 20_000, - connectionId: 'conn-1', - }), - 'conn-1', - ); - - const spy = jest.spyOn(storage, 'getMemorySnapshots'); - - await service.getForecast('conn-1'); - await service.getForecast('conn-1'); - - expect(spy.mock.calls.length).toBe(1); - }); - - it('10b: call after TTL expires recomputes', async () => { - const now = Date.now(); - await storage.saveMemorySnapshots( - generateSnapshots({ - count: 60, - startTime: now - 60 * 60_000, - intervalMs: 60_000, - startOps: 10_000, - endOps: 20_000, - connectionId: 'conn-1', - }), - 'conn-1', - ); - - const spy = jest.spyOn(storage, 'getMemorySnapshots'); - - await service.getForecast('conn-1'); - jest.advanceTimersByTime(61_000); - await service.getForecast('conn-1'); - - expect(spy.mock.calls.length).toBe(2); - }); - }); -}); diff --git a/apps/api/src/throughput-forecasting/throughput-forecasting.controller.ts b/apps/api/src/throughput-forecasting/throughput-forecasting.controller.ts deleted file mode 100644 index 9727c427..00000000 --- a/apps/api/src/throughput-forecasting/throughput-forecasting.controller.ts +++ /dev/null @@ -1,27 +0,0 @@ -import { Controller, Get, Put, Body } from '@nestjs/common'; -import { ThroughputForecastingService } from './throughput-forecasting.service'; -import { ConnectionId } from '../common/decorators/connection-id.decorator'; -import type { ThroughputForecast, ThroughputSettings, ThroughputSettingsUpdate } from '@betterdb/shared'; - -@Controller('throughput-forecasting') -export class ThroughputForecastingController { - constructor(private readonly service: ThroughputForecastingService) {} - - @Get('forecast') - async getForecast(@ConnectionId() connectionId?: string): Promise { - return this.service.getForecast(connectionId || 'env-default'); - } - - @Get('settings') - async getSettings(@ConnectionId() connectionId?: string): Promise { - return this.service.getSettings(connectionId || 'env-default'); - } - - @Put('settings') - async updateSettings( - @ConnectionId() connectionId?: string, - @Body() updates?: ThroughputSettingsUpdate, - ): Promise { - return this.service.updateSettings(connectionId || 'env-default', updates || {}); - } -} diff --git a/apps/api/src/throughput-forecasting/throughput-forecasting.module.ts b/apps/api/src/throughput-forecasting/throughput-forecasting.module.ts deleted file mode 100644 index 361ac30b..00000000 --- a/apps/api/src/throughput-forecasting/throughput-forecasting.module.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { Module } from '@nestjs/common'; -import { StorageModule } from '../storage/storage.module'; -import { ConnectionsModule } from '../connections/connections.module'; -import { ThroughputForecastingService } from './throughput-forecasting.service'; -import { ThroughputForecastingController } from './throughput-forecasting.controller'; - -@Module({ - imports: [StorageModule, ConnectionsModule], - providers: [ThroughputForecastingService], - controllers: [ThroughputForecastingController], - exports: [ThroughputForecastingService], -}) -export class ThroughputForecastingModule {} diff --git a/apps/api/src/webhooks/__tests__/webhooks.service.spec.ts b/apps/api/src/webhooks/__tests__/webhooks.service.spec.ts index f3df290e..a382932c 100644 --- a/apps/api/src/webhooks/__tests__/webhooks.service.spec.ts +++ b/apps/api/src/webhooks/__tests__/webhooks.service.spec.ts @@ -251,8 +251,6 @@ describe('WebhooksService', () => { describe('Tier Validation', () => { beforeEach(() => { - // Reset to non-production environment for tier validation tests - process.env.NODE_ENV = 'test'; storageClient.createWebhook.mockResolvedValue({ id: '123', name: 'Test', diff --git a/apps/api/src/webhooks/webhook-dispatcher.service.ts b/apps/api/src/webhooks/webhook-dispatcher.service.ts index 71df4ac4..edfa0a21 100644 --- a/apps/api/src/webhooks/webhook-dispatcher.service.ts +++ b/apps/api/src/webhooks/webhook-dispatcher.service.ts @@ -1,7 +1,12 @@ import { Injectable, Inject, Logger, Optional } from '@nestjs/common'; import { ConfigService } from '@nestjs/config'; import { LRUCache } from 'lru-cache'; -import type { Webhook, WebhookPayload, WebhookEventType, WebhookThresholds } from '@betterdb/shared'; +import type { + Webhook, + WebhookPayload, + WebhookEventType, + WebhookThresholds, +} from '@betterdb/shared'; import { DeliveryStatus, getDeliveryConfig, @@ -24,7 +29,13 @@ interface AlertState { export class WebhookDispatcherService { private readonly logger = new Logger(WebhookDispatcherService.name); private readonly DEFAULT_REQUEST_TIMEOUT_MS: number; - private readonly BLOCKED_HEADERS = ['host', 'content-length', 'transfer-encoding', 'connection', 'upgrade']; + private readonly BLOCKED_HEADERS = [ + 'host', + 'content-length', + 'transfer-encoding', + 'connection', + 'upgrade', + ]; // Alert hysteresis configuration (default, can be overridden per-webhook) // 10% hysteresis prevents alert flapping - e.g., for 90% threshold: @@ -46,7 +57,8 @@ export class WebhookDispatcherService { // 10KB limit: Balances debug utility vs. database storage costs // Large HTML error pages (500 errors) often exceed this, but we capture enough for debugging // For full responses, consider object storage integration (S3, etc.) - private readonly DEFAULT_MAX_STORED_RESPONSE_BODY_BYTES = DEFAULT_DELIVERY_CONFIG.maxResponseBodyBytes; + private readonly DEFAULT_MAX_STORED_RESPONSE_BODY_BYTES = + DEFAULT_DELIVERY_CONFIG.maxResponseBodyBytes; // Test webhook response preview limit (1KB) // Smaller than delivery limit since test responses are returned synchronously to API caller @@ -68,7 +80,10 @@ export class WebhookDispatcherService { private readonly configService: ConfigService, @Optional() private readonly connectionRegistry?: ConnectionRegistry, ) { - this.DEFAULT_REQUEST_TIMEOUT_MS = this.configService.get('WEBHOOK_TIMEOUT_MS', DEFAULT_DELIVERY_CONFIG.timeoutMs); + this.DEFAULT_REQUEST_TIMEOUT_MS = this.configService.get( + 'WEBHOOK_TIMEOUT_MS', + DEFAULT_DELIVERY_CONFIG.timeoutMs, + ); this.sourceHost = this.configService.get('database.host', 'localhost'); this.sourcePort = this.configService.get('database.port', 6379); } @@ -105,20 +120,25 @@ export class WebhookDispatcherService { const webhooks = await this.webhooksService.getWebhooksByEvent(eventType, connectionId); if (webhooks.length === 0) { - this.logger.debug(`No webhooks subscribed to event: ${eventType}${connectionId ? ` for connection ${connectionId}` : ''}`); + this.logger.debug( + `No webhooks subscribed to event: ${eventType}${connectionId ? ` for connection ${connectionId}` : ''}`, + ); return; } - this.logger.log(`Dispatching ${eventType} to ${webhooks.length} webhook(s)${connectionId ? ` for connection ${connectionId}` : ''}`); + this.logger.log( + `Dispatching ${eventType} to ${webhooks.length} webhook(s)${connectionId ? ` for connection ${connectionId}` : ''}`, + ); // Enrich data with connectionId if provided const enrichedData = connectionId ? { ...data, connectionId } : data; // Dispatch to all webhooks in parallel await Promise.allSettled( - webhooks.map(webhook => this.dispatchToWebhook(webhook, eventType, enrichedData, connectionId)) + webhooks.map((webhook) => + this.dispatchToWebhook(webhook, eventType, enrichedData, connectionId), + ), ); - } catch (error) { this.logger.error(`Failed to dispatch event ${eventType}:`, error); } @@ -179,7 +199,7 @@ export class WebhookDispatcherService { ): Promise { if (this.shouldFireAlert(alertKey, currentValue, threshold, isAbove)) { this.logger.log( - `Threshold alert triggered: ${eventType} (${currentValue} ${isAbove ? '>=' : '<='} ${threshold})` + `Threshold alert triggered: ${eventType} (${currentValue} ${isAbove ? '>=' : '<='} ${threshold})`, ); await this.dispatchEvent(eventType, data, connectionId); } @@ -210,13 +230,15 @@ export class WebhookDispatcherService { const webhooks = await this.webhooksService.getWebhooksByEvent(eventType, connectionId); if (webhooks.length === 0) { - this.logger.debug(`No webhooks subscribed to event: ${eventType}${connectionId ? ` for connection ${connectionId}` : ''}`); + this.logger.debug( + `No webhooks subscribed to event: ${eventType}${connectionId ? ` for connection ${connectionId}` : ''}`, + ); return; } // Dispatch to each webhook with its own threshold await Promise.allSettled( - webhooks.map(async webhook => { + webhooks.map(async (webhook) => { // Get per-webhook threshold and alert config const threshold = getThreshold(webhook, thresholdKey); const alertConfig = getAlertConfig(webhook); @@ -227,9 +249,17 @@ export class WebhookDispatcherService { ? `${alertKeyPrefix}:${connectionId}:${webhook.id}` : `${alertKeyPrefix}:${webhook.id}`; - if (this.shouldFireAlert(alertKey, currentValue, threshold, isAbove, alertConfig.hysteresisFactor)) { + if ( + this.shouldFireAlert( + alertKey, + currentValue, + threshold, + isAbove, + alertConfig.hysteresisFactor, + ) + ) { this.logger.log( - `Threshold alert triggered for webhook ${webhook.id}: ${eventType} (${currentValue} ${isAbove ? '>=' : '<='} ${threshold})` + `Threshold alert triggered for webhook ${webhook.id}: ${eventType} (${currentValue} ${isAbove ? '>=' : '<='} ${threshold})`, ); // Add threshold info to data @@ -242,9 +272,8 @@ export class WebhookDispatcherService { await this.dispatchToWebhook(webhook, eventType, enrichedData, connectionId); } - }) + }), ); - } catch (error) { this.logger.error(`Failed to dispatch per-webhook threshold alert ${eventType}:`, error); } @@ -337,11 +366,7 @@ export class WebhookDispatcherService { /** * Send webhook HTTP request */ - async sendWebhook( - webhook: Webhook, - deliveryId: string, - payload: WebhookPayload, - ): Promise { + async sendWebhook(webhook: Webhook, deliveryId: string, payload: WebhookPayload): Promise { const startTime = Date.now(); let status: DeliveryStatus = DeliveryStatus.PENDING; let statusCode: number | undefined; @@ -356,7 +381,11 @@ export class WebhookDispatcherService { // Prepare request const payloadString = JSON.stringify(payload); const timestamp = payload.timestamp; - const signature = this.generateSignatureWithTimestamp(payloadString, webhook.secret || '', timestamp); + const signature = this.generateSignatureWithTimestamp( + payloadString, + webhook.secret || '', + timestamp, + ); // Sanitize custom headers const sanitizedCustomHeaders = this.sanitizeHeaders(webhook.headers || {}); @@ -397,16 +426,15 @@ export class WebhookDispatcherService { // 4xx errors are client errors - don't retry status = DeliveryStatus.FAILED; this.logger.warn( - `Webhook delivery failed with client error ${statusCode}: ${webhook.id} -> ${webhook.url}` + `Webhook delivery failed with client error ${statusCode}: ${webhook.id} -> ${webhook.url}`, ); } else { // 5xx errors are server errors - retry status = DeliveryStatus.RETRYING; this.logger.warn( - `Webhook delivery failed with server error ${statusCode}: ${webhook.id} -> ${webhook.url}` + `Webhook delivery failed with server error ${statusCode}: ${webhook.id} -> ${webhook.url}`, ); } - } catch (fetchError: any) { clearTimeout(timeoutId); @@ -421,7 +449,6 @@ export class WebhookDispatcherService { this.logger.error(`Webhook delivery error: ${webhook.id} -> ${webhook.url}`, fetchError); } } - } catch (error: any) { status = DeliveryStatus.FAILED; responseBody = error.message || 'Unknown error'; @@ -475,19 +502,21 @@ export class WebhookDispatcherService { // If retrying, calculate next retry time if (status === DeliveryStatus.RETRYING && attempts < webhook.retryPolicy.maxRetries) { const delay = Math.min( - webhook.retryPolicy.initialDelayMs * Math.pow(webhook.retryPolicy.backoffMultiplier, attempts - 1), - webhook.retryPolicy.maxDelayMs + webhook.retryPolicy.initialDelayMs * + Math.pow(webhook.retryPolicy.backoffMultiplier, attempts - 1), + webhook.retryPolicy.maxDelayMs, ); updates.nextRetryAt = Date.now() + delay; } else if (status === DeliveryStatus.RETRYING) { // Max retries reached - mark as dead letter for manual investigation updates.status = DeliveryStatus.DEAD_LETTER; updates.completedAt = Date.now(); - this.logger.warn(`Delivery ${deliveryId} moved to dead letter queue after ${attempts} attempts`); + this.logger.warn( + `Delivery ${deliveryId} moved to dead letter queue after ${attempts} attempts`, + ); } await this.storageClient.updateDelivery(deliveryId, updates); - } catch (error) { this.logger.error(`Failed to update delivery ${deliveryId}:`, error); } @@ -511,7 +540,8 @@ export class WebhookDispatcherService { try { // Use first subscribed event for testing, or instance.down as fallback - const testEventType = webhook.events.length > 0 ? webhook.events[0] : ('instance.down' as WebhookEventType); + const testEventType = + webhook.events.length > 0 ? webhook.events[0] : ('instance.down' as WebhookEventType); // Get instance info for the webhook's connection (consistent with real dispatches) const instanceInfo = this.getInstanceInfo(webhook.connectionId); @@ -533,7 +563,11 @@ export class WebhookDispatcherService { const payloadString = JSON.stringify(testPayload); const timestamp = testPayload.timestamp; - const signature = this.generateSignatureWithTimestamp(payloadString, webhook.secret || '', timestamp); + const signature = this.generateSignatureWithTimestamp( + payloadString, + webhook.secret || '', + timestamp, + ); // Sanitize custom headers const sanitizedCustomHeaders = this.sanitizeHeaders(webhook.headers || {}); @@ -570,7 +604,6 @@ export class WebhookDispatcherService { responseBody: responseBody.substring(0, this.MAX_TEST_RESPONSE_PREVIEW_BYTES), durationMs, }; - } catch (error: any) { const durationMs = Date.now() - startTime; return { diff --git a/apps/api/test/global-setup.ts b/apps/api/test/global-setup.ts index 12d9364d..70263238 100644 --- a/apps/api/test/global-setup.ts +++ b/apps/api/test/global-setup.ts @@ -1,9 +1,19 @@ import { execSync } from 'child_process'; import * as path from 'path'; +const COMPOSE_FILE = 'docker-compose.test.yml'; +const PROJECT_NAME = 'betterdb-test'; + +const CONTAINER_NAMES = [ + 'betterdb-test-valkey', + 'betterdb-test-redis', + 'betterdb-test-postgres', +]; + /** * Global test setup - starts Docker containers before all tests. - * This ensures tests have a clean, isolated environment. + * Uses a separate compose file (docker-compose.test.yml) with dedicated + * container names and ports so tests never interfere with dev containers. */ export default async function globalSetup() { const projectRoot = path.resolve(__dirname, '../../..'); @@ -25,9 +35,9 @@ export default async function globalSetup() { throw new Error('Docker daemon is not running'); } - // Stop any existing containers (cleanup from previous failed runs) + // Stop any existing test containers (cleanup from previous failed runs) try { - execSync('docker compose -f docker-compose.yml down --remove-orphans', { + execSync(`docker compose -p ${PROJECT_NAME} -f ${COMPOSE_FILE} down --remove-orphans`, { cwd: projectRoot, stdio: 'ignore', }); @@ -35,13 +45,8 @@ export default async function globalSetup() { // Ignore errors if containers don't exist } - // Force remove containers by name if they still exist - const containerNames = [ - 'betterdb-monitor-valkey', - 'betterdb-monitor-redis', - 'betterdb-monitor-postgres', - ]; - for (const containerName of containerNames) { + // Force remove test containers by name if they still exist + for (const containerName of CONTAINER_NAMES) { try { execSync(`docker stop ${containerName} 2>/dev/null || true`, { stdio: 'ignore' }); execSync(`docker rm ${containerName} 2>/dev/null || true`, { stdio: 'ignore' }); @@ -50,12 +55,15 @@ export default async function globalSetup() { } } - // Start Docker containers - console.log(' Starting valkey, redis, and postgres...'); - execSync('docker compose -f docker-compose.yml up -d valkey redis postgres', { - cwd: projectRoot, - stdio: 'inherit', - }); + // Start test Docker containers + console.log(' Starting valkey, redis, and postgres (test containers)...'); + execSync( + `docker compose -p ${PROJECT_NAME} -f ${COMPOSE_FILE} up -d valkey redis postgres`, + { + cwd: projectRoot, + stdio: 'inherit', + }, + ); // Wait for services to be healthy console.log(' Waiting for services to be healthy...'); @@ -66,23 +74,22 @@ export default async function globalSetup() { while (!allHealthy && Date.now() - startTime < maxWaitTime) { try { const valkeyHealth = execSync( - 'docker inspect --format="{{.State.Health.Status}}" betterdb-monitor-valkey 2>/dev/null || echo "none"', - { encoding: 'utf-8' } + 'docker inspect --format="{{.State.Health.Status}}" betterdb-test-valkey 2>/dev/null || echo "none"', + { encoding: 'utf-8' }, ).trim(); - const redisRunning = execSync( - 'docker inspect --format="{{.State.Running}}" betterdb-monitor-redis 2>/dev/null || echo "false"', - { encoding: 'utf-8' } + const redisHealth = execSync( + 'docker inspect --format="{{.State.Health.Status}}" betterdb-test-redis 2>/dev/null || echo "none"', + { encoding: 'utf-8' }, ).trim(); const postgresHealth = execSync( - 'docker inspect --format="{{.State.Health.Status}}" betterdb-monitor-postgres 2>/dev/null || echo "none"', - { encoding: 'utf-8' } + 'docker inspect --format="{{.State.Health.Status}}" betterdb-test-postgres 2>/dev/null || echo "none"', + { encoding: 'utf-8' }, ).trim(); - // Valkey has healthcheck, redis doesn't (check if running), postgres has healthcheck const valkeyReady = valkeyHealth === 'healthy'; - const redisReady = redisRunning === 'true'; + const redisReady = redisHealth === 'healthy'; const postgresReady = postgresHealth === 'healthy'; if (valkeyReady && redisReady && postgresReady) { @@ -91,7 +98,7 @@ export default async function globalSetup() { } else { const status = []; if (!valkeyReady) status.push(`valkey: ${valkeyHealth}`); - if (!redisReady) status.push('redis: starting'); + if (!redisReady) status.push(`redis: ${redisHealth}`); if (!postgresReady) status.push(`postgres: ${postgresHealth}`); process.stdout.write(` Waiting... ${status.join(', ')}\r`); await new Promise((resolve) => setTimeout(resolve, 1000)); @@ -106,13 +113,13 @@ export default async function globalSetup() { throw new Error('Services did not become healthy within timeout'); } - // Additional wait for redis to be fully ready (no healthcheck) + // Additional wait for redis to be fully ready console.log(' Verifying Redis connectivity...'); let redisReady = false; const redisStartTime = Date.now(); while (!redisReady && Date.now() - redisStartTime < 10000) { try { - execSync('docker exec betterdb-monitor-redis redis-cli -a devpassword ping', { + execSync('docker exec betterdb-test-redis redis-cli -a devpassword ping', { stdio: 'ignore', }); redisReady = true; @@ -132,7 +139,7 @@ export default async function globalSetup() { // Show container logs for debugging try { console.log('\nContainer logs:'); - execSync('docker compose -f docker-compose.yml logs --tail=50', { + execSync(`docker compose -p ${PROJECT_NAME} -f ${COMPOSE_FILE} logs --tail=50`, { cwd: projectRoot, stdio: 'inherit', }); diff --git a/apps/api/test/global-teardown.ts b/apps/api/test/global-teardown.ts index b47c0c64..4893bc74 100644 --- a/apps/api/test/global-teardown.ts +++ b/apps/api/test/global-teardown.ts @@ -1,9 +1,12 @@ import { execSync } from 'child_process'; import * as path from 'path'; +const COMPOSE_FILE = 'docker-compose.test.yml'; +const PROJECT_NAME = 'betterdb-test'; + /** - * Global test teardown - stops Docker containers after all tests. - * This ensures clean shutdown and resource cleanup. + * Global test teardown - stops test Docker containers after all tests. + * Only affects test containers (betterdb-test-*), never dev containers. */ export default async function globalTeardown() { const projectRoot = path.resolve(__dirname, '../../..'); @@ -20,18 +23,17 @@ export default async function globalTeardown() { return; } - console.log('\nCleaning up Docker containers...'); + console.log('\nCleaning up test Docker containers...'); try { - // Stop and remove containers - execSync('docker compose -f docker-compose.yml down --remove-orphans', { + execSync(`docker compose -p ${PROJECT_NAME} -f ${COMPOSE_FILE} down --remove-orphans`, { cwd: projectRoot, stdio: 'inherit', }); - console.log('Docker containers stopped and removed\n'); + console.log('Test Docker containers stopped and removed\n'); } catch (error) { - console.error('Failed to stop Docker containers:', error); + console.error('Failed to stop test Docker containers:', error); // Don't throw - we want tests to complete even if cleanup fails } } diff --git a/apps/api/test/setup-env.ts b/apps/api/test/setup-env.ts index d4693a57..4bd53d53 100644 --- a/apps/api/test/setup-env.ts +++ b/apps/api/test/setup-env.ts @@ -5,8 +5,8 @@ if (!process.env.DB_HOST) { } if (!process.env.DB_PORT) { - // Default to Valkey port from docker-compose (6380 mapped to host) - process.env.DB_PORT = process.env.TEST_DB_PORT || '6380'; + // Default to Valkey port from docker-compose.test.yml (6390 mapped to host) + process.env.DB_PORT = process.env.TEST_DB_PORT || '6390'; } if (!process.env.DB_PASSWORD) { diff --git a/apps/web/src/App.tsx b/apps/web/src/App.tsx index e7113cb6..fc4d220f 100644 --- a/apps/web/src/App.tsx +++ b/apps/web/src/App.tsx @@ -28,11 +28,10 @@ import { ClusterDashboard } from './pages/ClusterDashboard'; import { Settings } from './pages/Settings'; import { Webhooks } from './pages/Webhooks'; import { VectorSearch } from './pages/VectorSearch'; -import { ThroughputForecasting } from './pages/ThroughputForecasting'; +import { MetricForecasting } from './pages/MetricForecasting'; import { Members } from './pages/Members'; import { workspaceApi, CloudUser } from './api/workspace'; import { Feature } from '@betterdb/shared'; -import { settingsApi } from './api/settings'; function App() { return ( @@ -98,16 +97,9 @@ function AppLayout({ cloudUser }: { cloudUser: CloudUser | null }) { const location = useLocation(); const { hasVectorSearch } = useCapabilities(); const [showFeedback, setShowFeedback] = useState(false); - const [throughputForecastingEnabled, setThroughputForecastingEnabled] = useState(true); useIdleTracker(); useNavigationTracker(); - useEffect(() => { - settingsApi.getSettings() - .then((res) => setThroughputForecastingEnabled(res.settings.throughputForecastingEnabled ?? true)) - .catch(() => {}); - }, [location.pathname]); - return (
-
+
+
+ + onUpdate({ enabled: !settings.enabled })} + /> +
- + - onUpdate({ opsCeiling: e.target.value ? parseInt(e.target.value) : null }) + onUpdate({ ceiling: e.target.value ? parseFloat(e.target.value) : null }) } className="w-full px-3 py-2 border rounded-md" /> @@ -57,12 +80,10 @@ export function SettingsPanel({ value={settings.alertThresholdMs} onChange={(e) => onUpdate({ alertThresholdMs: parseInt(e.target.value) })} className="w-full px-3 py-2 border rounded-md" - disabled={settings.opsCeiling === null} + disabled={settings.ceiling === null} > {ALERT_PRESETS.map((p) => ( - + ))}
diff --git a/apps/web/src/components/pages/metric-forecasting/__tests__/formatters.test.ts b/apps/web/src/components/pages/metric-forecasting/__tests__/formatters.test.ts new file mode 100644 index 00000000..306c3245 --- /dev/null +++ b/apps/web/src/components/pages/metric-forecasting/__tests__/formatters.test.ts @@ -0,0 +1,69 @@ +import { describe, it, expect } from 'vitest'; +import { formatMetricValue, formatGrowthRate, formatTime } from '../formatters'; + +describe('formatMetricValue', () => { + it('formats bytes as GB', () => { + expect(formatMetricValue(2_147_483_648, 'bytes')).toBe('2.0 GB'); + }); + + it('formats bytes as MB', () => { + expect(formatMetricValue(52_428_800, 'bytes')).toBe('50.0 MB'); + }); + + it('formats bytes as KB', () => { + expect(formatMetricValue(2048, 'bytes')).toBe('2.0 KB'); + }); + + it('formats small bytes as B', () => { + expect(formatMetricValue(512, 'bytes')).toBe('512 B'); + }); + + it('formats percent', () => { + expect(formatMetricValue(75.5, 'percent')).toBe('75.5%'); + }); + + it('formats ratio', () => { + expect(formatMetricValue(1.35, 'ratio')).toBe('1.35x'); + }); + + it('formats ops as K', () => { + expect(formatMetricValue(12_345, 'ops')).toBe('12.3K ops/sec'); + }); + + it('formats ops as M', () => { + expect(formatMetricValue(1_500_000, 'ops')).toBe('1.5M ops/sec'); + }); + + it('formats small ops as integer', () => { + expect(formatMetricValue(42, 'ops')).toBe('42 ops/sec'); + }); + + it('formats zero for each type', () => { + expect(formatMetricValue(0, 'bytes')).toBe('0 B'); + expect(formatMetricValue(0, 'percent')).toBe('0.0%'); + expect(formatMetricValue(0, 'ratio')).toBe('0.00x'); + expect(formatMetricValue(0, 'ops')).toBe('0 ops/sec'); + }); +}); + +describe('formatGrowthRate', () => { + it('formats positive growth', () => { + expect(formatGrowthRate(5000, 'ops')).toBe('+5.0K/hr'); + }); + + it('formats negative growth', () => { + expect(formatGrowthRate(-1048576, 'bytes')).toBe('-1.0 MB/hr'); + }); + + it('formats zero growth', () => { + expect(formatGrowthRate(0, 'ops')).toBe('+0/hr'); + }); +}); + +describe('formatTime', () => { + it('returns a time string with hours and minutes', () => { + const ts = new Date(2026, 2, 30, 14, 35).getTime(); + const result = formatTime(ts); + expect(result).toMatch(/\d{2}:\d{2}/); + }); +}); diff --git a/apps/web/src/components/pages/metric-forecasting/formatters.ts b/apps/web/src/components/pages/metric-forecasting/formatters.ts new file mode 100644 index 00000000..1e014c14 --- /dev/null +++ b/apps/web/src/components/pages/metric-forecasting/formatters.ts @@ -0,0 +1,59 @@ +const SIZE_GB = 1_073_741_824; + +const SIZE_MB = 1_048_576; + +const SIZE_KB = 1_024; + +function formatNumber(value: number, formatter: 'bytes' | 'percent' | 'ratio' | 'ops'): string { + switch (formatter) { + case 'bytes': + if (value >= SIZE_GB) { + return `${(value / SIZE_GB).toFixed(1)} GB`; + } + if (value >= SIZE_MB) { + return `${(value / SIZE_MB).toFixed(1)} MB`; + } + if (value >= SIZE_KB) { + return `${(value / SIZE_KB).toFixed(1)} KB`; + } + return `${value} B`; + case 'percent': + return `${value.toFixed(1)}`; + case 'ratio': + return `${value.toFixed(2)}`; + case 'ops': + if (value >= 1_000_000) { + return `${(value / 1_000_000).toFixed(1)}M`; + } + if (value >= 1_000) { + return `${(value / 1_000).toFixed(1)}K`; + } + return `${Math.round(value)}`; + } +} + +const UNIT_SUFFIX = { + bytes: '', + percent: '%', + ratio: 'x', + ops: ' ops/sec', +} as const; + +export function formatMetricValue( + value: number, + formatter: 'bytes' | 'percent' | 'ratio' | 'ops', +): string { + return `${formatNumber(value, formatter)}${UNIT_SUFFIX[formatter]}`; +} + +export function formatGrowthRate( + rate: number, + formatter: 'bytes' | 'percent' | 'ratio' | 'ops', +): string { + const sign = rate >= 0 ? '+' : '-'; + return `${sign}${formatNumber(Math.abs(rate), formatter)}/hr`; +} + +export function formatTime(timestamp: number): string { + return new Date(timestamp).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' }); +} diff --git a/apps/web/src/components/pages/metric-forecasting/index.ts b/apps/web/src/components/pages/metric-forecasting/index.ts new file mode 100644 index 00000000..a9c7e37d --- /dev/null +++ b/apps/web/src/components/pages/metric-forecasting/index.ts @@ -0,0 +1,7 @@ +export { MetricChart } from './MetricChart'; +export { MetricForecastCard } from './MetricForecastCard'; +export { MetricSettingsPanel } from './MetricSettingsPanel'; +export { MetricDisabled } from './MetricDisabled'; +export { MetricInsufficientData } from './MetricInsufficientData'; +export { MetricLoading } from './MetricLoading'; +export { formatMetricValue, formatTime } from './formatters'; diff --git a/apps/web/src/components/pages/settings/throughput-forecasting-settings-section.tsx b/apps/web/src/components/pages/settings/throughput-forecasting-settings-section.tsx deleted file mode 100644 index b2526294..00000000 --- a/apps/web/src/components/pages/settings/throughput-forecasting-settings-section.tsx +++ /dev/null @@ -1,64 +0,0 @@ -import { Toggle } from '../../ui/toggle'; - -interface ThroughputForecastingSettingsSectionProps { - throughputForecastingEnabled: boolean; - throughputForecastingDefaultRollingWindowMs: number; - throughputForecastingDefaultAlertThresholdMs: number; - onToggleEnabled: () => void; - onRollingWindowChange: (value: number) => void; - onAlertThresholdChange: (value: number) => void; -} - -export function ThroughputForecastingSettingsSection({ - throughputForecastingEnabled, - throughputForecastingDefaultRollingWindowMs, - throughputForecastingDefaultAlertThresholdMs, - onToggleEnabled, - onRollingWindowChange, - onAlertThresholdChange, -}: ThroughputForecastingSettingsSectionProps) { - return ( -
-

Throughput Forecasting

-

- These defaults are applied when throughput forecasting is first activated for a connection. - Per-connection settings can be customized on the Throughput Forecast page. -

- -
-
- - -
-
- - -
- -
- - -
-
-
- ); -} diff --git a/apps/web/src/components/pages/throughput-forecasting/Disabled.tsx b/apps/web/src/components/pages/throughput-forecasting/Disabled.tsx deleted file mode 100644 index 8de97b74..00000000 --- a/apps/web/src/components/pages/throughput-forecasting/Disabled.tsx +++ /dev/null @@ -1,33 +0,0 @@ -import { Card } from '../../ui/card.tsx'; -import { SettingsPanel } from './SettingsPanel.tsx'; -import { ThroughputSettings, ThroughputSettingsUpdate } from '@betterdb/shared'; - -export const Disabled = ({ - updateSetting, - settings, - saveStatus, -}: { - updateSetting: (updates: ThroughputSettingsUpdate) => void; - settings: ThroughputSettings; - saveStatus: 'idle' | 'saved' | 'error'; -}) => { - return ( -
-

Throughput Forecast

- -
-

- Throughput forecasting is disabled for this connection. -

- -
-
- -
- ); -}; diff --git a/apps/web/src/components/pages/throughput-forecasting/ForecastCard.tsx b/apps/web/src/components/pages/throughput-forecasting/ForecastCard.tsx deleted file mode 100644 index 11f03cfc..00000000 --- a/apps/web/src/components/pages/throughput-forecasting/ForecastCard.tsx +++ /dev/null @@ -1,51 +0,0 @@ -import type { ThroughputForecast } from '@betterdb/shared'; -import { Card } from '../../ui/card.tsx'; -import { formatOps } from './utils.ts'; - -export function ForecastCard({ forecast }: { forecast: ThroughputForecast }) { - const directionArrow = - forecast.trendDirection === 'rising' - ? '\u2197' - : forecast.trendDirection === 'falling' - ? '\u2198' - : '\u2192'; - - return ( - -

- {forecast.mode === 'forecast' ? 'Throughput Forecast' : 'Throughput Trend'} -

- - {forecast.mode === 'forecast' && forecast.timeToLimitHuman && ( -

{forecast.timeToLimitHuman}

- )} - -
-
-

Current

-

{formatOps(forecast.currentOpsPerSec)} ops/sec

-
- {forecast.opsCeiling && ( -
-

Ceiling

-

{formatOps(forecast.opsCeiling)} ops/sec

-
- )} -
-

Growth Rate

-

- {forecast.growthRate >= 0 ? '+' : ''} - {formatOps(Math.round(forecast.growthRate))}/hr -

-
-
-

Trend

-

- {directionArrow} {forecast.trendDirection} ({forecast.growthPercent >= 0 ? '+' : ''} - {forecast.growthPercent.toFixed(1)}%) -

-
-
-
- ); -} diff --git a/apps/web/src/components/pages/throughput-forecasting/InsufficientData.tsx b/apps/web/src/components/pages/throughput-forecasting/InsufficientData.tsx deleted file mode 100644 index d0425e8e..00000000 --- a/apps/web/src/components/pages/throughput-forecasting/InsufficientData.tsx +++ /dev/null @@ -1,14 +0,0 @@ -import { Card } from '../../ui/card.tsx'; -import { formatOps } from './utils.ts'; -import { ThroughputForecast } from '@betterdb/shared'; - -export const InsufficientData = ({ forecast }: { forecast: ThroughputForecast }) => ( - -

{forecast.insufficientDataMessage}

- {forecast.currentOpsPerSec > 0 && ( -

- {formatOps(forecast.currentOpsPerSec)} ops/sec -

- )} -
-); diff --git a/apps/web/src/components/pages/throughput-forecasting/Loading.tsx b/apps/web/src/components/pages/throughput-forecasting/Loading.tsx deleted file mode 100644 index ce55bc7b..00000000 --- a/apps/web/src/components/pages/throughput-forecasting/Loading.tsx +++ /dev/null @@ -1,8 +0,0 @@ -export const Loading = () => { - return ( -
-

Throughput Forecast

-

Loading...

-
- ); -}; diff --git a/apps/web/src/components/pages/throughput-forecasting/index.ts b/apps/web/src/components/pages/throughput-forecasting/index.ts deleted file mode 100644 index 9452f783..00000000 --- a/apps/web/src/components/pages/throughput-forecasting/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -export * from './ThroughputChart'; -export * from './SettingsPanel'; -export * from './Loading'; -export * from './ForecastCard'; -export * from './InsufficientData'; -export * from './Disabled'; -export * from './utils'; diff --git a/apps/web/src/components/pages/throughput-forecasting/utils.ts b/apps/web/src/components/pages/throughput-forecasting/utils.ts deleted file mode 100644 index 9109c26b..00000000 --- a/apps/web/src/components/pages/throughput-forecasting/utils.ts +++ /dev/null @@ -1,24 +0,0 @@ -export const WINDOW_PRESETS = [ - { label: '1h', value: 3600000 }, - { label: '3h', value: 10800000 }, - { label: '6h', value: 21600000 }, - { label: '12h', value: 43200000 }, - { label: '24h', value: 86400000 }, -]; - -export const ALERT_PRESETS = [ - { label: '30m', value: 1800000 }, - { label: '1h', value: 3600000 }, - { label: '2h', value: 7200000 }, - { label: '4h', value: 14400000 }, -]; - -export function formatOps(value: number): string { - if (value >= 1_000_000) return `${(value / 1_000_000).toFixed(1)}M`; - if (value >= 1_000) return `${(value / 1_000).toFixed(1)}K`; - return value.toString(); -} - -export function formatTime(timestamp: number): string { - return new Date(timestamp).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' }); -} diff --git a/apps/web/src/components/webhooks/WebhookForm.tsx b/apps/web/src/components/webhooks/WebhookForm.tsx index 75aade8b..abbd3f93 100644 --- a/apps/web/src/components/webhooks/WebhookForm.tsx +++ b/apps/web/src/components/webhooks/WebhookForm.tsx @@ -64,7 +64,7 @@ const EVENT_LABELS: Record = { 'acl.violation': 'ACL Violation', 'acl.modified': 'ACL Modified', 'config.changed': 'Config Changed', - 'throughput.limit': 'Throughput Limit', + 'metric_forecast.limit': 'Metric Forecast Limit', }; // Tier display names diff --git a/apps/web/src/pages/MetricForecasting.tsx b/apps/web/src/pages/MetricForecasting.tsx new file mode 100644 index 00000000..26e6ef20 --- /dev/null +++ b/apps/web/src/pages/MetricForecasting.tsx @@ -0,0 +1,155 @@ +import { useRef, useState, useCallback } from 'react'; +import { useQuery, useQueryClient } from '@tanstack/react-query'; +import { useSearchParams } from 'react-router-dom'; +import { useConnection } from '../hooks/useConnection'; +import { metricForecastingApi } from '../api/metric-forecasting'; +import { metricsApi } from '../api/metrics'; +import { METRIC_EXTRACTORS } from './metric-forecasting-extractors'; +import { + METRIC_KIND_META, + ALL_METRIC_KINDS, + type MetricKind, + type MetricForecastSettingsUpdate, +} from '@betterdb/shared'; +import { + MetricForecastCard, + MetricChart, + MetricSettingsPanel, + MetricLoading, + MetricDisabled, + MetricInsufficientData, + formatTime, +} from '../components/pages/metric-forecasting'; + +const TAB_LABELS: Record = { + opsPerSec: 'Throughput', + usedMemory: 'Memory', + cpuTotal: 'CPU', + memFragmentation: 'Fragmentation', +}; + +export function MetricForecasting() { + const { currentConnection } = useConnection(); + const queryClient = useQueryClient(); + const [searchParams, setSearchParams] = useSearchParams(); + const [saveStatus, setSaveStatus] = useState<'idle' | 'saved' | 'error'>('idle'); + const saveTimeout = useRef>(undefined); + const debounceTimeout = useRef>(undefined); + + const connectionId = currentConnection?.id; + const tabParam = searchParams.get('tab'); + const activeTab: MetricKind = + tabParam && ALL_METRIC_KINDS.includes(tabParam as MetricKind) + ? (tabParam as MetricKind) + : 'opsPerSec'; + + const setActiveTab = useCallback( + (tab: MetricKind) => { + setSearchParams({ tab }, { replace: true }); + }, + [setSearchParams], + ); + + const meta = METRIC_KIND_META[activeTab]; + + const { data: forecast } = useQuery({ + queryKey: ['metric-forecast', connectionId, activeTab], + queryFn: ({ signal }) => metricForecastingApi.getForecast(activeTab, signal), + enabled: !!connectionId, + refetchInterval: 30_000, + }); + + const { data: settings } = useQuery({ + queryKey: ['metric-forecast-settings', connectionId, activeTab], + queryFn: ({ signal }) => metricForecastingApi.getSettings(activeTab, signal), + enabled: !!connectionId, + }); + + const { data: chartData = [] } = useQuery({ + queryKey: ['metric-forecast-chart', connectionId, activeTab, settings?.rollingWindowMs], + queryFn: async () => { + const now = Date.now(); + const snapshots = await metricsApi.getStoredMemorySnapshots({ + startTime: now - settings!.rollingWindowMs, + limit: 1500, + }); + const extractor = METRIC_EXTRACTORS[activeTab]; + return [...snapshots] + .sort((a, b) => a.timestamp - b.timestamp) + .map((s) => ({ time: s.timestamp, value: extractor(s), label: formatTime(s.timestamp) })); + }, + enabled: !!connectionId && !!settings, + refetchInterval: 30_000, + }); + + const updateSetting = (updates: MetricForecastSettingsUpdate) => { + if (debounceTimeout.current) clearTimeout(debounceTimeout.current); + + queryClient.setQueryData( + ['metric-forecast-settings', connectionId, activeTab], + (prev: typeof settings) => (prev ? { ...prev, ...updates, updatedAt: Date.now() } : prev), + ); + + debounceTimeout.current = setTimeout(async () => { + try { + const updated = await metricForecastingApi.updateSettings(activeTab, updates); + queryClient.setQueryData(['metric-forecast-settings', connectionId, activeTab], updated); + setSaveStatus('saved'); + await queryClient.invalidateQueries({ + queryKey: ['metric-forecast', connectionId, activeTab], + }); + if (saveTimeout.current) clearTimeout(saveTimeout.current); + saveTimeout.current = setTimeout(() => setSaveStatus('idle'), 2000); + } catch { + await queryClient.invalidateQueries({ + queryKey: ['metric-forecast-settings', connectionId, activeTab], + }); + setSaveStatus('error'); + } + }, 500); + }; + + if (!forecast || !settings) return ; + + return ( +
+

Forecast

+ + {/* Tab bar */} +
+ {ALL_METRIC_KINDS.map((kind) => ( + + ))} +
+ + {/* Tab content */} + {!forecast.enabled ? ( + + ) : forecast.insufficientData ? ( + + ) : ( + <> + + + + )} + + +
+ ); +} diff --git a/apps/web/src/pages/Settings.tsx b/apps/web/src/pages/Settings.tsx index f673c827..a086e51a 100644 --- a/apps/web/src/pages/Settings.tsx +++ b/apps/web/src/pages/Settings.tsx @@ -6,9 +6,7 @@ import { useConnection } from '../hooks/useConnection'; import { AppSettings, SettingsUpdateRequest } from '@betterdb/shared'; import { Card } from '../components/ui/card'; import { Badge } from '../components/ui/badge'; -import { ThroughputForecastingSettingsSection } from '../components/pages/settings/throughput-forecasting-settings-section'; - -type SettingsCategory = 'audit' | 'clientAnalytics' | 'anomaly' | 'throughputForecasting' | 'mcpTokens'; +type SettingsCategory = 'audit' | 'clientAnalytics' | 'anomaly' | 'mcpTokens'; export function Settings({ isCloudMode = false }: { isCloudMode?: boolean }) { const { currentConnection } = useConnection(); @@ -155,7 +153,6 @@ export function Settings({ isCloudMode = false }: { isCloudMode?: boolean }) { { id: 'audit', label: 'Audit Trail' }, { id: 'clientAnalytics', label: 'Client Analytics' }, { id: 'anomaly', label: 'Anomaly Detection' }, - { id: 'throughputForecasting', label: 'Throughput Forecasting' }, ...(isCloudMode ? [{ id: 'mcpTokens' as const, label: 'MCP Tokens' }] : []), ]; @@ -268,30 +265,6 @@ export function Settings({ isCloudMode = false }: { isCloudMode?: boolean }) {
)} - {activeCategory === 'throughputForecasting' && ( - - handleInputChange( - 'throughputForecastingEnabled', - !formData.throughputForecastingEnabled, - ) - } - onRollingWindowChange={(value) => - handleInputChange('throughputForecastingDefaultRollingWindowMs', value) - } - onAlertThresholdChange={(value) => - handleInputChange('throughputForecastingDefaultAlertThresholdMs', value) - } - /> - )} - {activeCategory === 'mcpTokens' && (

MCP Tokens

diff --git a/apps/web/src/pages/ThroughputForecasting.tsx b/apps/web/src/pages/ThroughputForecasting.tsx deleted file mode 100644 index 3fa73325..00000000 --- a/apps/web/src/pages/ThroughputForecasting.tsx +++ /dev/null @@ -1,102 +0,0 @@ -import { useRef, useState } from 'react'; -import { useQuery, useQueryClient } from '@tanstack/react-query'; -import { useConnection } from '../hooks/useConnection'; -import { metricsApi } from '../api/metrics'; -import type { ThroughputSettingsUpdate } from '@betterdb/shared'; -import { - ForecastCard, - formatTime, - Loading, - SettingsPanel, - ThroughputChart, - Disabled, - InsufficientData, -} from '../components/pages/throughput-forecasting'; - -export function ThroughputForecasting() { - const { currentConnection } = useConnection(); - const queryClient = useQueryClient(); - const [saveStatus, setSaveStatus] = useState<'idle' | 'saved' | 'error'>('idle'); - const saveTimeout = useRef>(undefined); - const debounceTimeout = useRef>(undefined); - - const connectionId = currentConnection?.id; - - const { data: forecast } = useQuery({ - queryKey: ['throughput-forecast', connectionId], - queryFn: ({ signal }) => metricsApi.getThroughputForecast(signal), - refetchInterval: 30_000, - }); - - const { data: settings } = useQuery({ - queryKey: ['throughput-settings', connectionId], - queryFn: ({ signal }) => metricsApi.getThroughputSettings(signal), - }); - - const { data: chartData = [] } = useQuery({ - queryKey: ['throughput-chart', connectionId, settings?.rollingWindowMs], - queryFn: async () => { - const now = Date.now(); - const snapshots = await metricsApi.getStoredMemorySnapshots({ - startTime: now - settings!.rollingWindowMs, - limit: 1500, - }); - return [...snapshots] - .sort((a, b) => a.timestamp - b.timestamp) - .map((s) => ({ time: s.timestamp, ops: s.opsPerSec, label: formatTime(s.timestamp) })); - }, - enabled: !!settings, - refetchInterval: 30_000, - }); - - const updateSetting = (updates: ThroughputSettingsUpdate) => { - if (debounceTimeout.current) clearTimeout(debounceTimeout.current); - - // Optimistic update - queryClient.setQueryData( - ['throughput-settings', connectionId], - (prev: typeof settings) => (prev ? { ...prev, ...updates, updatedAt: Date.now() } : prev), - ); - - debounceTimeout.current = setTimeout(async () => { - try { - const updated = await metricsApi.updateThroughputSettings(updates); - queryClient.setQueryData(['throughput-settings', connectionId], updated); - setSaveStatus('saved'); - await queryClient.invalidateQueries({ queryKey: ['throughput-forecast', connectionId] }); - if (saveTimeout.current) clearTimeout(saveTimeout.current); - saveTimeout.current = setTimeout(() => setSaveStatus('idle'), 2000); - } catch { - // Revert optimistic update - await queryClient.invalidateQueries({ queryKey: ['throughput-settings', connectionId] }); - setSaveStatus('error'); - } - }, 500); - }; - - // ── Page States ── - - if (!forecast || !settings) { - return ; - } - - if (!forecast.enabled) { - return ; - } - - return ( -
-

Throughput Forecast

- - {forecast.insufficientData ? ( - - ) : ( - <> - - - - )} - -
- ); -} diff --git a/apps/web/src/pages/metric-forecasting-extractors.ts b/apps/web/src/pages/metric-forecasting-extractors.ts new file mode 100644 index 00000000..1c070c17 --- /dev/null +++ b/apps/web/src/pages/metric-forecasting-extractors.ts @@ -0,0 +1,11 @@ +import type { MetricKind } from '@betterdb/shared'; +import type { StoredMemorySnapshot } from '../types/metrics'; + +type Extractor = (snapshot: StoredMemorySnapshot) => number; + +export const METRIC_EXTRACTORS: Record = { + opsPerSec: (s) => s.opsPerSec, + usedMemory: (s) => s.usedMemory, + cpuTotal: (s) => (s.cpuSys ?? 0) + (s.cpuUser ?? 0), + memFragmentation: (s) => s.memFragmentationRatio, +}; diff --git a/docker-compose.test.yml b/docker-compose.test.yml new file mode 100644 index 00000000..45cf4197 --- /dev/null +++ b/docker-compose.test.yml @@ -0,0 +1,56 @@ +services: + valkey: + image: valkey/valkey:8-alpine + container_name: betterdb-test-valkey + ports: + - "6390:6379" + command: > + valkey-server + --requirepass devpassword + --slowlog-log-slower-than 0 + --commandlog-request-larger-than 100 + --commandlog-reply-larger-than 100 + --commandlog-slow-execution-max-len 128 + --commandlog-large-request-max-len 128 + --commandlog-large-reply-max-len 128 + healthcheck: + test: ["CMD", "valkey-cli", "ping"] + interval: 5s + timeout: 3s + retries: 3 + + redis: + image: redis:8-alpine + container_name: betterdb-test-redis + ports: + - "6392:6379" + command: > + redis-server + --requirepass devpassword + --appendonly yes + --slowlog-log-slower-than 10000 + --slowlog-max-len 128 + --latency-monitor-threshold 100 + --acllog-max-len 128 + healthcheck: + test: ["CMD", "redis-cli", "-a", "devpassword", "ping"] + interval: 5s + timeout: 3s + retries: 3 + + postgres: + image: postgres:16-alpine + container_name: betterdb-test-postgres + ports: + - "5433:5432" + environment: + POSTGRES_USER: betterdb + POSTGRES_PASSWORD: devpassword + POSTGRES_DB: betterdb + healthcheck: + test: ["CMD-SHELL", "pg_isready -U betterdb"] + interval: 5s + timeout: 3s + retries: 3 + tmpfs: + - /var/lib/postgresql/data diff --git a/packages/shared/src/index.ts b/packages/shared/src/index.ts index 61c9c5b9..508c0dc3 100644 --- a/packages/shared/src/index.ts +++ b/packages/shared/src/index.ts @@ -13,4 +13,4 @@ export * from './utils/key-patterns'; export * from './license/index'; export * from './webhooks/index'; export * from './types/vector-index-snapshots'; -export * from './types/throughput.types'; +export * from './types/metric-forecasting.types'; diff --git a/packages/shared/src/types/metric-forecasting.types.ts b/packages/shared/src/types/metric-forecasting.types.ts new file mode 100644 index 00000000..c4d12beb --- /dev/null +++ b/packages/shared/src/types/metric-forecasting.types.ts @@ -0,0 +1,87 @@ +export type MetricKind = 'opsPerSec' | 'usedMemory' | 'cpuTotal' | 'memFragmentation'; + +export interface MetricForecastSettings { + connectionId: string; + metricKind: MetricKind; + enabled: boolean; + ceiling: number | null; + rollingWindowMs: number; + alertThresholdMs: number; + updatedAt: number; +} + +export interface MetricForecast { + connectionId: string; + metricKind: MetricKind; + mode: 'trend' | 'forecast'; + currentValue: number; + growthRate: number; + growthPercent: number; + trendDirection: 'rising' | 'falling' | 'stable'; + dataPointCount: number; + windowMs: number; + ceiling: number | null; + timeToLimitMs: number | null; + timeToLimitHuman: string; + enabled: boolean; + insufficientData: boolean; + insufficientDataMessage?: string; +} + +export interface MetricForecastSettingsUpdate { + enabled?: boolean; + ceiling?: number | null; + rollingWindowMs?: number; + alertThresholdMs?: number; +} + +export interface MetricKindMeta { + label: string; + unit: string; + unitLabel: string; + ceilingLabel: string; + defaultCeiling: number | null; + valueFormatter: 'bytes' | 'percent' | 'ratio' | 'ops'; +} + +export const METRIC_KIND_META: Record = { + opsPerSec: { + label: 'Ops/sec', + unit: 'ops/sec', + unitLabel: 'ops/sec', + ceilingLabel: 'Ops/sec Ceiling', + defaultCeiling: null, + valueFormatter: 'ops', + }, + usedMemory: { + label: 'Memory', + unit: 'bytes', + unitLabel: 'MB', + ceilingLabel: 'Memory Ceiling', + defaultCeiling: null, + valueFormatter: 'bytes', + }, + cpuTotal: { + label: 'CPU', + unit: 'percent', + unitLabel: '%', + ceilingLabel: 'CPU Ceiling', + defaultCeiling: 100, + valueFormatter: 'percent', + }, + memFragmentation: { + label: 'Fragmentation', + unit: 'ratio', + unitLabel: 'x', + ceilingLabel: 'Fragmentation Ceiling', + defaultCeiling: 1.5, + valueFormatter: 'ratio', + }, +}; + +export const ALL_METRIC_KINDS: MetricKind[] = [ + 'opsPerSec', + 'usedMemory', + 'cpuTotal', + 'memFragmentation', +]; diff --git a/packages/shared/src/types/settings.types.ts b/packages/shared/src/types/settings.types.ts index 91948a78..90bbd71b 100644 --- a/packages/shared/src/types/settings.types.ts +++ b/packages/shared/src/types/settings.types.ts @@ -9,9 +9,9 @@ export interface AppSettings { anomalyCacheTtlMs: number; anomalyPrometheusIntervalMs: number; - throughputForecastingEnabled: boolean; - throughputForecastingDefaultRollingWindowMs: number; - throughputForecastingDefaultAlertThresholdMs: number; + metricForecastingEnabled: boolean; + metricForecastingDefaultRollingWindowMs: number; + metricForecastingDefaultAlertThresholdMs: number; updatedAt: number; createdAt: number; diff --git a/packages/shared/src/types/throughput.types.ts b/packages/shared/src/types/throughput.types.ts deleted file mode 100644 index 7a4f3f26..00000000 --- a/packages/shared/src/types/throughput.types.ts +++ /dev/null @@ -1,32 +0,0 @@ -export interface ThroughputSettings { - connectionId: string; - enabled: boolean; - opsCeiling: number | null; - rollingWindowMs: number; - alertThresholdMs: number; - updatedAt: number; -} - -export interface ThroughputForecast { - connectionId: string; - mode: 'trend' | 'forecast'; - currentOpsPerSec: number; - growthRate: number; - growthPercent: number; - trendDirection: 'rising' | 'falling' | 'stable'; - dataPointCount: number; - windowMs: number; - opsCeiling: number | null; - timeToLimitMs: number | null; - timeToLimitHuman: string; - enabled: boolean; - insufficientData: boolean; - insufficientDataMessage?: string; -} - -export interface ThroughputSettingsUpdate { - enabled?: boolean; - opsCeiling?: number | null; - rollingWindowMs?: number; - alertThresholdMs?: number; -} diff --git a/packages/shared/src/webhooks/types.ts b/packages/shared/src/webhooks/types.ts index b3a45aeb..2c053080 100644 --- a/packages/shared/src/webhooks/types.ts +++ b/packages/shared/src/webhooks/types.ts @@ -15,7 +15,7 @@ export enum WebhookEventType { CLUSTER_FAILOVER = 'cluster.failover', AUDIT_POLICY_VIOLATION = 'audit.policy.violation', COMPLIANCE_ALERT = 'compliance.alert', - THROUGHPUT_LIMIT = 'throughput.limit', + METRIC_FORECAST_LIMIT = 'metric_forecast.limit', } // Injection tokens for proprietary webhook services @@ -38,7 +38,7 @@ export const PRO_EVENTS: WebhookEventType[] = [ WebhookEventType.CLUSTER_FAILOVER, WebhookEventType.LATENCY_SPIKE, WebhookEventType.CONNECTION_SPIKE, - WebhookEventType.THROUGHPUT_LIMIT, + WebhookEventType.METRIC_FORECAST_LIMIT, ]; export const ENTERPRISE_EVENTS: WebhookEventType[] = [ @@ -75,7 +75,7 @@ export const WEBHOOK_EVENT_TIERS: Record = { [WebhookEventType.CLUSTER_FAILOVER]: Tier.pro, [WebhookEventType.LATENCY_SPIKE]: Tier.pro, [WebhookEventType.CONNECTION_SPIKE]: Tier.pro, - [WebhookEventType.THROUGHPUT_LIMIT]: Tier.pro, + [WebhookEventType.METRIC_FORECAST_LIMIT]: Tier.pro, // Enterprise tier events [WebhookEventType.AUDIT_POLICY_VIOLATION]: Tier.enterprise, @@ -313,15 +313,17 @@ export interface IWebhookEventsProService { connectionId?: string; }): Promise; - dispatchThroughputLimit(data: { - currentOpsPerSec: number; - opsCeiling: number; + dispatchMetricForecastLimit(data: { + event: string; + metricKind: string; + currentValue: number; + ceiling: number | null; timeToLimitMs: number; threshold: number; growthRate: number; timestamp: number; - instance: WebhookInstanceInfo; - connectionId?: string; + instance?: { host: string; port: number }; + connectionId: string; }): Promise; } diff --git a/proprietary/webhook-pro/__tests__/throughput-limit.spec.ts b/proprietary/webhook-pro/__tests__/throughput-limit.spec.ts index f7c14f1c..ba4e20a8 100644 --- a/proprietary/webhook-pro/__tests__/throughput-limit.spec.ts +++ b/proprietary/webhook-pro/__tests__/throughput-limit.spec.ts @@ -4,14 +4,16 @@ import { WebhookDispatcherService } from '@app/webhooks/webhook-dispatcher.servi import { WebhookEventType } from '@betterdb/shared'; import { LicenseService } from '@proprietary/licenses'; -describe('WebhookEventsProService - dispatchThroughputLimit', () => { +describe('WebhookEventsProService - dispatchMetricForecastLimit', () => { let service: WebhookEventsProService; let webhookDispatcher: { dispatchThresholdAlert: jest.Mock }; let licenseService: { getLicenseTier: jest.Mock }; const testData = { - currentOpsPerSec: 50_000, - opsCeiling: 80_000, + event: 'metric_forecast.limit', + metricKind: 'opsPerSec', + currentValue: 50_000, + ceiling: 80_000, timeToLimitMs: 7_200_000, // 2 hours threshold: 7_200_000, growthRate: 10_000, @@ -42,31 +44,34 @@ describe('WebhookEventsProService - dispatchThroughputLimit', () => { // ── Slice 11: Webhook Dispatch (Pro) ── it('11a: dispatches with correct parameters when Pro licensed', async () => { - await service.dispatchThroughputLimit(testData); + await service.dispatchMetricForecastLimit(testData); expect(webhookDispatcher.dispatchThresholdAlert).toHaveBeenCalledTimes(1); const [eventType, alertKey, value, threshold, isAbove] = webhookDispatcher.dispatchThresholdAlert.mock.calls[0]; - expect(eventType).toBe(WebhookEventType.THROUGHPUT_LIMIT); + expect(eventType).toBe(WebhookEventType.METRIC_FORECAST_LIMIT); expect(isAbove).toBe(false); expect(value).toBe(7_200_000); expect(threshold).toBe(7_200_000); }); - it('11b: payload contains human-readable message', async () => { - await service.dispatchThroughputLimit(testData); + it('11b: payload contains human-readable message and metric fields', async () => { + await service.dispatchMetricForecastLimit(testData); const payload = webhookDispatcher.dispatchThresholdAlert.mock.calls[0][5]; - expect(payload.message).toContain('~2h'); + expect(payload.message).toContain('~2.0h'); + expect(payload.metricKind).toBe('opsPerSec'); + expect(payload.currentValue).toBe(50_000); + expect(payload.ceiling).toBe(80_000); }); - it('11c: alert key includes connectionId', async () => { - await service.dispatchThroughputLimit(testData); + it('11c: alert key includes connectionId and metricKind', async () => { + await service.dispatchMetricForecastLimit(testData); const alertKey = webhookDispatcher.dispatchThresholdAlert.mock.calls[0][1]; - expect(alertKey).toBe('throughput_limit:conn-42'); + expect(alertKey).toBe('metric_forecast_limit:conn-42:opsPerSec'); }); // ── Slice 12: Webhook Skips (Community) ── @@ -74,7 +79,7 @@ describe('WebhookEventsProService - dispatchThroughputLimit', () => { it('12a: skips dispatch when Community tier', async () => { licenseService.getLicenseTier.mockReturnValue('community'); - await service.dispatchThroughputLimit(testData); + await service.dispatchMetricForecastLimit(testData); expect(webhookDispatcher.dispatchThresholdAlert).not.toHaveBeenCalled(); }); diff --git a/proprietary/webhook-pro/webhook-events-pro.service.ts b/proprietary/webhook-pro/webhook-events-pro.service.ts index 64a60b9d..ccb93b84 100644 --- a/proprietary/webhook-pro/webhook-events-pro.service.ts +++ b/proprietary/webhook-pro/webhook-events-pro.service.ts @@ -31,7 +31,9 @@ export class WebhookEventsProService implements OnModuleInit { if (this.isEnabled()) { this.logger.log('Webhook Pro events service initialized - PRO tier events enabled'); } else { - this.logger.log('Webhook Pro events service initialized - PRO tier events disabled (requires license)'); + this.logger.log( + 'Webhook Pro events service initialized - PRO tier events disabled (requires license)', + ); } } @@ -72,7 +74,7 @@ export class WebhookEventsProService implements OnModuleInit { timestamp: data.timestamp, instance: data.instance, }, - data.connectionId + data.connectionId, ); } @@ -107,7 +109,7 @@ export class WebhookEventsProService implements OnModuleInit { timestamp: data.timestamp, instance: data.instance, }, - data.connectionId + data.connectionId, ); } @@ -142,7 +144,7 @@ export class WebhookEventsProService implements OnModuleInit { timestamp: data.timestamp, instance: data.instance, }, - data.connectionId + data.connectionId, ); } @@ -180,7 +182,7 @@ export class WebhookEventsProService implements OnModuleInit { timestamp: data.timestamp, instance: data.instance, }, - data.connectionId + data.connectionId, ); } @@ -215,7 +217,7 @@ export class WebhookEventsProService implements OnModuleInit { timestamp: data.timestamp, instance: data.instance, }, - data.connectionId + data.connectionId, ); } @@ -250,41 +252,47 @@ export class WebhookEventsProService implements OnModuleInit { timestamp: data.timestamp, instance: data.instance, }, - data.connectionId + data.connectionId, ); } /** - * Dispatch throughput limit event (PRO+) + * Dispatch metric forecast limit event (PRO+) * Called when projected time-to-limit drops below configured threshold */ - async dispatchThroughputLimit(data: { - currentOpsPerSec: number; - opsCeiling: number; + async dispatchMetricForecastLimit(data: { + event: string; + metricKind: string; + currentValue: number; + ceiling: number | null; timeToLimitMs: number; threshold: number; growthRate: number; timestamp: number; - instance: { host: string; port: number }; - connectionId?: string; + instance?: { host: string; port: number }; + connectionId: string; }): Promise { if (!this.isEnabled()) { - this.logger.debug('Throughput limit event skipped - requires PRO license'); + this.logger.log('Metric forecast limit event skipped - requires PRO license'); return; } + const ceilingLabel = data.ceiling != null ? data.ceiling : 'unknown'; + const timeHours = (data.timeToLimitMs / 3_600_000).toFixed(1); + await this.webhookDispatcher.dispatchThresholdAlert( - WebhookEventType.THROUGHPUT_LIMIT, - `throughput_limit:${data.connectionId || 'default'}`, + WebhookEventType.METRIC_FORECAST_LIMIT, + `metric_forecast_limit:${data.connectionId}:${data.metricKind}`, data.timeToLimitMs, data.threshold, false, // isAbove = false: fire when timeToLimit drops BELOW threshold { - currentOpsPerSec: data.currentOpsPerSec, - opsCeiling: data.opsCeiling, + metricKind: data.metricKind, + currentValue: data.currentValue, + ceiling: data.ceiling, timeToLimitMs: data.timeToLimitMs, growthRate: data.growthRate, - message: `Ops/sec projected to reach ceiling (${data.opsCeiling}) in ~${Math.round(data.timeToLimitMs / 3_600_000)}h`, + message: `${data.metricKind} projected to reach ceiling (${ceilingLabel}) in ~${timeHours}h at current growth rate`, timestamp: data.timestamp, instance: data.instance, }, diff --git a/scripts/demo/throughput-ramp.mjs b/scripts/demo/throughput-ramp.mjs new file mode 100755 index 00000000..4d001eb6 --- /dev/null +++ b/scripts/demo/throughput-ramp.mjs @@ -0,0 +1,264 @@ +#!/usr/bin/env node + +/** + * Simulates a gradually increasing throughput load on Valkey for testing + * the Throughput Forecasting feature. + * + * Usage: + * node scripts/demo/throughput-ramp.mjs [options] + * + * Options: + * --host Valkey host (default: localhost) + * --port Valkey port (default: 6380) + * --auth Password (optional) + * --duration Total duration in minutes (default: 60) + * --start-rps Starting requests per second (default: 100) + * --end-rps Ending requests per second (default: 5000) + * --pattern Load pattern: ramp|spike|wave (default: ramp) + * --grow-keys Write unique keys each tick so memory grows + * --value-size Value size in bytes for --grow-keys (default: 1024) + * --cleanup Remove generated keys on exit + * + * Patterns: + * ramp - Linear increase from start-rps to end-rps over duration + * spike - Steady at start-rps, then sudden jump to end-rps at 75% of duration + * wave - Oscillates between start-rps and end-rps with 10-minute period + */ + +import { createConnection } from 'node:net'; +import { parseArgs } from 'node:util'; + +const { values: opts } = parseArgs({ + options: { + host: { type: 'string', default: 'localhost' }, + port: { type: 'string', default: '6380' }, + auth: { type: 'string', default: 'devpassword' }, + duration: { type: 'string', default: '60' }, + 'start-rps': { type: 'string', default: '100' }, + 'end-rps': { type: 'string', default: '5000' }, + pattern: { type: 'string', default: 'ramp' }, + 'grow-keys': { type: 'boolean', default: false }, + 'value-size': { type: 'string', default: '1024' }, + cleanup: { type: 'boolean', default: false }, + }, + strict: true, +}); + +const HOST = opts.host; +const PORT = parseInt(opts.port); +const AUTH = opts.auth; +const DURATION_MIN = parseInt(opts.duration); +const START_RPS = parseInt(opts['start-rps']); +const END_RPS = parseInt(opts['end-rps']); +const PATTERN = opts.pattern; +const GROW_KEYS = opts['grow-keys']; +const VALUE_SIZE = parseInt(opts['value-size']); +const CLEANUP = opts.cleanup; +const KEY_PREFIX = 'throughput_test'; +const DURATION_MS = DURATION_MIN * 60_000; + +let keyCounter = 0; +let valuePayload = ''; +if (GROW_KEYS) { + valuePayload = 'x'.repeat(VALUE_SIZE); +} + +// --- RESP protocol helpers --- + +function encodeCommand(...args) { + let out = `*${args.length}\r\n`; + for (const arg of args) { + const s = String(arg); + out += `$${Buffer.byteLength(s)}\r\n${s}\r\n`; + } + return out; +} + +// --- Pattern functions --- + +function getTargetRps(elapsedMs) { + const progress = Math.min(elapsedMs / DURATION_MS, 1); + + switch (PATTERN) { + case 'ramp': + return Math.round(START_RPS + (END_RPS - START_RPS) * progress); + case 'spike': { + return progress < 0.75 ? START_RPS : END_RPS; + } + case 'wave': { + const mid = (START_RPS + END_RPS) / 2; + const amp = (END_RPS - START_RPS) / 2; + const periodMs = 10 * 60_000; + return Math.round(mid + amp * Math.sin((2 * Math.PI * elapsedMs) / periodMs)); + } + default: + return START_RPS; + } +} + +// --- Connection --- + +function connect() { + return new Promise((resolve, reject) => { + const sock = createConnection({ host: HOST, port: PORT }, () => resolve(sock)); + sock.on('error', reject); + }); +} + +async function authenticate(sock) { + if (!AUTH) return; + return new Promise((resolve, reject) => { + sock.once('data', (data) => { + if (data.toString().startsWith('+OK')) resolve(); + else reject(new Error(`AUTH failed: ${data.toString().trim()}`)); + }); + sock.write(encodeCommand('AUTH', AUTH)); + }); +} + +async function ping(sock) { + return new Promise((resolve, reject) => { + sock.once('data', (data) => { + if (data.toString().includes('PONG')) resolve(); + else reject(new Error(`PING failed: ${data.toString().trim()}`)); + }); + sock.write(encodeCommand('PING')); + }); +} + +// --- Cleanup --- + +async function cleanupKeys(sock) { + if (!CLEANUP) return; + process.stdout.write('\nCleaning up keys...'); + let cursor = '0'; + let deleted = 0; + do { + const resp = await new Promise((resolve) => { + let buf = ''; + const onData = (data) => { + buf += data.toString(); + // Wait for a complete SCAN response (heuristic: ends with \r\n after all elements) + if (buf.split('\r\n').length > 3 && buf.endsWith('\r\n')) { + sock.removeListener('data', onData); + resolve(buf); + } + }; + sock.on('data', onData); + sock.write(encodeCommand('SCAN', cursor, 'MATCH', `${KEY_PREFIX}_*`, 'COUNT', '1000')); + }); + const lines = resp.split('\r\n').filter(Boolean); + cursor = lines[1]; + const keys = lines.slice(3).filter((l) => !l.startsWith('*') && !l.startsWith('$')); + if (keys.length > 0) { + sock.write(encodeCommand('DEL', ...keys)); + deleted += keys.length; + // Drain the DEL response + await new Promise((resolve) => { + sock.once('data', () => resolve()); + }); + } + } while (cursor !== '0'); + console.log(` removed ${deleted} keys.`); +} + +// --- Main --- + +console.log('============================================'); +console.log(' Throughput Ramp - Load Generator'); +console.log('============================================'); +console.log(''); +console.log(` Target: ${HOST}:${PORT}`); +console.log(` Pattern: ${PATTERN}`); +console.log(` Duration: ${DURATION_MIN}m`); +console.log(` Start RPS: ${START_RPS}`); +console.log(` End RPS: ${END_RPS}`); +console.log(` Grow keys: ${GROW_KEYS}`); +if (GROW_KEYS) console.log(` Value size: ${VALUE_SIZE}B`); +console.log(` Cleanup: ${CLEANUP}`); +console.log(''); +console.log(' Press Ctrl+C to stop'); +console.log(''); + +const sock = await connect(); +await authenticate(sock); +await ping(sock); + +// Switch socket to flowing mode, discard responses (fire-and-forget) +sock.resume(); + +const startTime = Date.now(); +let opsThisSec = 0; +let lastSecond = Math.floor(Date.now() / 1000); +let tickTimer = null; +let done = false; + +const TICK_MS = 10; // Fire every 10ms, batch commands per tick + +function scheduleNext() { + if (done) return; + + const elapsed = Date.now() - startTime; + if (elapsed >= DURATION_MS) { + finish(); + return; + } + + const targetRps = getTargetRps(elapsed); + const batchSize = Math.max(1, Math.round(targetRps * TICK_MS / 1000)); + + // Send a batch of commands + let buf = ''; + for (let i = 0; i < batchSize; i++) { + if (GROW_KEYS) { + const key = `${KEY_PREFIX}_${keyCounter++}`; + buf += encodeCommand('SET', key, valuePayload); + } else { + buf += encodeCommand('PING'); + } + } + sock.write(buf); + opsThisSec += batchSize; + + // Log progress once per second + const nowSec = Math.floor(Date.now() / 1000); + if (nowSec !== lastSecond) { + const elapsedMin = Math.floor(elapsed / 60_000); + const remainingMin = Math.max(0, DURATION_MIN - elapsedMin); + const pct = Math.min(100, Math.round((elapsed / DURATION_MS) * 100)); + const filled = Math.round((pct * 20) / 100); + const bar = '\u2588'.repeat(filled) + '\u2591'.repeat(20 - filled); + + process.stdout.write( + `\r ${bar} ${String(pct).padStart(3)}% | ` + + `${String(elapsedMin).padStart(3)}m/${DURATION_MIN}m | ` + + `${String(opsThisSec).padStart(5)} ops/sec | ${PATTERN} | ${remainingMin}m left `, + ); + opsThisSec = 0; + lastSecond = nowSec; + } + + tickTimer = setTimeout(scheduleNext, TICK_MS); +} + +function finish() { + if (done) return; + done = true; + if (tickTimer) clearTimeout(tickTimer); + + const runtime = Math.round((Date.now() - startTime) / 1000); + console.log('\n'); + console.log('Load generation complete.'); + console.log(`Total runtime: ${runtime}s`); + + (async () => { + await cleanupKeys(sock); + sock.end(); + process.exit(0); + })(); +} + +process.on('SIGINT', finish); +process.on('SIGTERM', finish); + +scheduleNext(); diff --git a/scripts/demo/throughput-ramp.sh b/scripts/demo/throughput-ramp.sh deleted file mode 100755 index d0278882..00000000 --- a/scripts/demo/throughput-ramp.sh +++ /dev/null @@ -1,236 +0,0 @@ -#!/bin/zsh - -# Simulates a gradually increasing throughput load on Valkey for testing -# the Throughput Forecasting feature. -# -# Usage: -# ./scripts/demo/throughput-ramp.sh [options] -# -# Options: -# -h, --host Valkey host (default: localhost) -# -p, --port Valkey port (default: 6380) -# -a, --auth Password (default: devpassword) -# -d, --duration Total duration in minutes (default: 60) -# -s, --start-rps Starting requests per second (default: 100) -# -e, --end-rps Ending requests per second (default: 5000) -# --pattern Load pattern: ramp|spike|wave (default: ramp) -# --grow-keys Write unique keys each tick so memory grows over time -# --value-size Value size in bytes for --grow-keys (default: 1024) -# --cleanup Remove generated keys on exit -# -# Patterns: -# ramp - Linear increase from start-rps to end-rps over duration -# spike - Steady at start-rps, then sudden jump to end-rps at 75% of duration -# wave - Oscillates between start-rps and end-rps with 10-minute period - -set -eo pipefail - -HOST="localhost" -PORT="6380" -AUTH="" -DURATION_MIN=60 -START_RPS=100 -END_RPS=5000 -PATTERN="ramp" -GROW_KEYS=false -VALUE_SIZE=1024 -CLEANUP=false -KEY_PREFIX="throughput_test" - -while [[ $# -gt 0 ]]; do - case $1 in - -h|--host) HOST="$2"; shift 2;; - -p|--port) PORT="$2"; shift 2;; - -a|--auth) AUTH="$2"; shift 2;; - -d|--duration) DURATION_MIN="$2"; shift 2;; - -s|--start-rps) START_RPS="$2"; shift 2;; - -e|--end-rps) END_RPS="$2"; shift 2;; - --pattern) PATTERN="$2"; shift 2;; - --grow-keys) GROW_KEYS=true; shift;; - --value-size) VALUE_SIZE="$2"; shift 2;; - --cleanup) CLEANUP=true; shift;; - *) echo "Unknown option: $1"; exit 1;; - esac -done - -USE_DOCKER=false -CLI="" -AUTH_ARGS=() -[[ -n "$AUTH" ]] && AUTH_ARGS=(-a "$AUTH") - -if command -v valkey-cli &> /dev/null; then - CLI="valkey-cli" -elif command -v redis-cli &> /dev/null; then - CLI="redis-cli" -elif docker exec betterdb-monitor-valkey valkey-cli "${AUTH_ARGS[@]}" PING > /dev/null 2>&1; then - USE_DOCKER=true - echo " Using docker exec (no local CLI found)" -else - echo "Error: No valkey-cli, redis-cli, or running Docker container found" - exit 1 -fi - -# Build CLI command as an array to preserve argument boundaries -if $USE_DOCKER; then - CLI_CMD=(docker exec -i betterdb-monitor-valkey valkey-cli "${AUTH_ARGS[@]}") - CLI_PIPE=(docker exec -i betterdb-monitor-valkey valkey-cli "${AUTH_ARGS[@]}" --pipe) -else - CLI_CMD=("$CLI" -h "$HOST" -p "$PORT" "${AUTH_ARGS[@]}") - CLI_PIPE=("$CLI" -h "$HOST" -p "$PORT" "${AUTH_ARGS[@]}" --pipe) -fi - -# Verify connection -if ! "${CLI_CMD[@]}" PING > /dev/null 2>&1; then - echo "Error: Cannot connect to Valkey" - exit 1 -fi - -DURATION_SEC=$((DURATION_MIN * 60)) -TICK_SEC=1 # Adjust load every 1 second for smoother throughput -TOTAL_TICKS=$((DURATION_SEC / TICK_SEC)) -KEY_COUNTER=0 - -cleanup() { - echo "" - echo "Stopping load generation..." - - if $CLEANUP; then - echo "Cleaning up keys..." - cursor=0 - while true; do - result=$("${CLI_CMD[@]}" SCAN "$cursor" MATCH "${KEY_PREFIX}_*" COUNT 1000 2>/dev/null) - cursor=$(echo "$result" | head -1) - keys=$(echo "$result" | tail -n +2) - if [[ -n "$keys" ]]; then - echo "$keys" | xargs "${CLI_CMD[@]}" DEL > /dev/null 2>&1 - fi - [[ "$cursor" == "0" ]] && break - done - echo "Cleanup complete." - fi -} - -trap cleanup EXIT INT TERM - -get_target_rps() { - local tick=$1 - - case $PATTERN in - ramp) - # Linear interpolation from START_RPS to END_RPS - local progress - progress=$(echo "scale=4; $tick / $TOTAL_TICKS" | bc) - echo "scale=0; $START_RPS + ($END_RPS - $START_RPS) * $progress / 1" | bc - ;; - spike) - # Steady at START_RPS, jump to END_RPS at 75% duration - local threshold=$((TOTAL_TICKS * 3 / 4)) - if [[ $tick -lt $threshold ]]; then - echo "$START_RPS" - else - echo "$END_RPS" - fi - ;; - wave) - # Sinusoidal oscillation with 10-minute period - local mid=$(( (START_RPS + END_RPS) / 2 )) - local amp=$(( (END_RPS - START_RPS) / 2 )) - local period_ticks=$((600 / TICK_SEC)) # 10 min period - local angle - angle=$(echo "scale=6; 3.14159 * 2 * $tick / $period_ticks" | bc) - local sin_val - sin_val=$(echo "scale=6; s($angle)" | bc -l) - echo "scale=0; $mid + $amp * $sin_val / 1" | bc - ;; - *) - echo "$START_RPS" - ;; - esac -} - -# Pre-generate the value payload for --grow-keys mode -if $GROW_KEYS; then - VALUE_PAYLOAD=$(head -c "$VALUE_SIZE" < /dev/zero | tr '\0' 'x') -fi - -# Generate load at target RPS for one tick (TICK_SEC seconds) -# Spreads commands evenly across the tick in small batches (every 100ms) -# so Valkey's instantaneous_ops_per_sec rolling average stays accurate. -run_tick() { - local target_rps=$1 - local total_ops=$((target_rps * TICK_SEC)) - if ((total_ops < 1)); then total_ops=1; fi - - # Split into 10 batches per second (every 100ms) so the load is steady - local batches_per_sec=10 - local batch_count=$((TICK_SEC * batches_per_sec)) - local ops_per_batch=$(( (total_ops + batch_count - 1) / batch_count )) - local delay=$(printf "%.3f" $(echo "scale=3; 1.0 / $batches_per_sec" | bc)) - - local sent=0 - for ((b = 0; b < batch_count && sent < total_ops; b++)); do - local this_batch=$ops_per_batch - ((sent + this_batch > total_ops)) && this_batch=$((total_ops - sent)) - - local batch="" - if $GROW_KEYS; then - local val_len=${#VALUE_PAYLOAD} - for ((i = 0; i < this_batch; i++)); do - local key="${KEY_PREFIX}_${KEY_COUNTER}" - KEY_COUNTER=$((KEY_COUNTER + 1)) - batch+="*3\r\n\$3\r\nSET\r\n\$${#key}\r\n${key}\r\n\$${val_len}\r\n${VALUE_PAYLOAD}\r\n" - done - else - for ((i = 0; i < this_batch; i++)); do - batch+="*1\r\n\$4\r\nPING\r\n" - done - fi - printf "$batch" | "${CLI_PIPE[@]}" > /dev/null 2>&1 - sent=$((sent + this_batch)) - - sleep "$delay" - done -} - -echo "============================================" -echo " Throughput Ramp - Load Generator" -echo "============================================" -echo "" -echo " Target: $HOST:$PORT" -echo " Pattern: $PATTERN" -echo " Duration: ${DURATION_MIN}m" -echo " Start RPS: $START_RPS" -echo " End RPS: $END_RPS" -echo " Grow keys: $GROW_KEYS" -if $GROW_KEYS; then -echo " Value size: ${VALUE_SIZE}B" -fi -echo " Cleanup: $CLEANUP" -echo "" -echo " Press Ctrl+C to stop" -echo "" - -START_TIME=$(date +%s) - -for ((tick = 0; tick < TOTAL_TICKS; tick++)); do - target_rps=$(get_target_rps $tick) - elapsed_min=$(( (tick * TICK_SEC) / 60 )) - remaining_min=$(( (DURATION_SEC - tick * TICK_SEC) / 60 )) - - # Progress bar - local pct=$((tick * 100 / TOTAL_TICKS)) - local bar_len=20 - local filled=$((pct * bar_len / 100)) - local empty=$((bar_len - filled)) - local bar=$(printf '%0.s█' $(seq 1 $filled 2>/dev/null))$(printf '%0.s░' $(seq 1 $empty 2>/dev/null)) - - printf "\r %s %3d%% | %3dm/%dm | %5d ops/sec | %s | %dm left " \ - "$bar" "$pct" "$elapsed_min" "$DURATION_MIN" "$target_rps" "$PATTERN" "$remaining_min" - - run_tick "$target_rps" -done - -echo "" -echo "" -echo "Load generation complete." -echo "Total runtime: $(( $(date +%s) - START_TIME ))s" From f469f8b142f4a8cabbf0624461060d7952f79d7f Mon Sep 17 00:00:00 2001 From: jamby77 Date: Tue, 31 Mar 2026 14:57:16 +0300 Subject: [PATCH 08/20] bugfix: import WebhookEventType as value from @betterdb/shared The storage-port interface re-exports it as 'export type', which cannot be used as a runtime value. Import directly from shared. --- .../src/metric-forecasting/metric-forecasting.service.ts | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/apps/api/src/metric-forecasting/metric-forecasting.service.ts b/apps/api/src/metric-forecasting/metric-forecasting.service.ts index 46aa06ee..b172c8b6 100644 --- a/apps/api/src/metric-forecasting/metric-forecasting.service.ts +++ b/apps/api/src/metric-forecasting/metric-forecasting.service.ts @@ -6,7 +6,7 @@ import { OnModuleInit, Optional, } from '@nestjs/common'; -import { StoragePort, WebhookEventType } from '../common/interfaces/storage-port.interface'; +import { StoragePort } from '../common/interfaces/storage-port.interface'; import { SettingsService } from '../settings/settings.service'; import { ConnectionRegistry } from '../connections/connection-registry.service'; import type { @@ -15,7 +15,11 @@ import type { MetricForecastSettingsUpdate, MetricKind, } from '@betterdb/shared'; -import { WEBHOOK_EVENTS_PRO_SERVICE, type IWebhookEventsProService } from '@betterdb/shared'; +import { + WEBHOOK_EVENTS_PRO_SERVICE, + WebhookEventType, + type IWebhookEventsProService, +} from '@betterdb/shared'; import { METRIC_EXTRACTORS } from './metric-extractors'; import { CEILING_RESOLVERS } from './ceiling-resolvers'; From a1e99dd529f967468608e46fcbbd5364bf1c760a Mon Sep 17 00:00:00 2001 From: jamby77 Date: Tue, 31 Mar 2026 15:02:34 +0300 Subject: [PATCH 09/20] bugfix: update e2e test ports to match docker-compose.test.yml E2e specs hardcoded old dev ports (6380/6382) instead of test ports (6390/6392). Also update CI workflow to reference test container names. --- .github/workflows/api-tests.yml | 6 +++--- apps/api/test/database-compatibility.e2e-spec.ts | 4 ++-- apps/api/test/valkey-features.e2e-spec.ts | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml index 88843d1b..3862b9f5 100644 --- a/.github/workflows/api-tests.yml +++ b/.github/workflows/api-tests.yml @@ -52,8 +52,8 @@ jobs: if: failure() run: | echo "=== Valkey Logs ===" - docker logs betterdb-monitor-valkey 2>&1 | tail -50 || echo "Container not found" + docker logs betterdb-test-valkey 2>&1 | tail -50 || echo "Container not found" echo "=== Redis Logs ===" - docker logs betterdb-monitor-redis 2>&1 | tail -50 || echo "Container not found" + docker logs betterdb-test-redis 2>&1 | tail -50 || echo "Container not found" echo "=== PostgreSQL Logs ===" - docker logs betterdb-monitor-postgres 2>&1 | tail -50 || echo "Container not found" \ No newline at end of file + docker logs betterdb-test-postgres 2>&1 | tail -50 || echo "Container not found" \ No newline at end of file diff --git a/apps/api/test/database-compatibility.e2e-spec.ts b/apps/api/test/database-compatibility.e2e-spec.ts index 93728d49..61dd03a5 100644 --- a/apps/api/test/database-compatibility.e2e-spec.ts +++ b/apps/api/test/database-compatibility.e2e-spec.ts @@ -6,10 +6,10 @@ describe('Database Compatibility (E2E)', () => { // Common tests that work with both Redis and Valkey // Use TEST_DB_HOST and TEST_DB_PORT to override defaults - // Default to Redis on 6382 (from docker-compose.yml) + // Default to Redis on 6392 (from docker-compose.test.yml) const DB_CONFIG = { host: process.env.TEST_DB_HOST || 'localhost', - port: parseInt(process.env.TEST_DB_PORT || '6382', 10), + port: parseInt(process.env.TEST_DB_PORT || '6392', 10), username: process.env.TEST_DB_USERNAME || 'default', password: process.env.TEST_DB_PASSWORD || 'devpassword', }; diff --git a/apps/api/test/valkey-features.e2e-spec.ts b/apps/api/test/valkey-features.e2e-spec.ts index c1df93c6..620382cb 100644 --- a/apps/api/test/valkey-features.e2e-spec.ts +++ b/apps/api/test/valkey-features.e2e-spec.ts @@ -6,10 +6,10 @@ describe('Valkey-Specific Features (E2E)', () => { // These tests are specific to Valkey and should only run against Valkey instances // Use TEST_DB_HOST and TEST_DB_PORT to override defaults - // Default to Valkey on 6380 (from docker-compose.yml) + // Default to Valkey on 6390 (from docker-compose.test.yml) const DB_CONFIG = { host: process.env.TEST_DB_HOST || 'localhost', - port: parseInt(process.env.TEST_DB_PORT || '6380', 10), + port: parseInt(process.env.TEST_DB_PORT || '6390', 10), username: process.env.TEST_DB_USERNAME || 'default', password: process.env.TEST_DB_PASSWORD || 'devpassword', }; From 8b449d6398e05a70fcfd02f8884f899b6e7a0048 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Tue, 31 Mar 2026 15:06:23 +0300 Subject: [PATCH 10/20] bugfix: address PR review feedback for metric forecasting - Fix catastrophic floating-point cancellation in linear regression by normalizing timestamps before accumulation - Add validation DTO for settings update endpoint with bounds checks - Replace Prometheus -1 sentinel with Infinity and remove stale gauges - Downgrade checkAlerts per-iteration log from INFO to DEBUG - Add NaN/negative guard on frontend ceiling input --- .../update-metric-forecast-settings.dto.ts | 25 +++++++++++++++++++ .../metric-forecasting.controller.ts | 4 +-- .../metric-forecasting.service.ts | 17 +++++++++---- apps/api/src/prometheus/prometheus.service.ts | 15 ++++++++--- .../MetricSettingsPanel.tsx | 12 ++++++--- 5 files changed, 60 insertions(+), 13 deletions(-) create mode 100644 apps/api/src/metric-forecasting/dto/update-metric-forecast-settings.dto.ts diff --git a/apps/api/src/metric-forecasting/dto/update-metric-forecast-settings.dto.ts b/apps/api/src/metric-forecasting/dto/update-metric-forecast-settings.dto.ts new file mode 100644 index 00000000..7bfe124a --- /dev/null +++ b/apps/api/src/metric-forecasting/dto/update-metric-forecast-settings.dto.ts @@ -0,0 +1,25 @@ +import { IsBoolean, IsNumber, IsOptional, Min, Max, ValidateIf } from 'class-validator'; + +export class UpdateMetricForecastSettingsDto { + @IsOptional() + @IsBoolean() + enabled?: boolean; + + @IsOptional() + @ValidateIf((_obj, value) => value !== null) + @IsNumber() + @Min(0) + ceiling?: number | null; + + @IsOptional() + @IsNumber() + @Min(60_000) + @Max(86_400_000) + rollingWindowMs?: number; + + @IsOptional() + @IsNumber() + @Min(60_000) + @Max(86_400_000) + alertThresholdMs?: number; +} diff --git a/apps/api/src/metric-forecasting/metric-forecasting.controller.ts b/apps/api/src/metric-forecasting/metric-forecasting.controller.ts index 382881eb..974af2ff 100644 --- a/apps/api/src/metric-forecasting/metric-forecasting.controller.ts +++ b/apps/api/src/metric-forecasting/metric-forecasting.controller.ts @@ -1,11 +1,11 @@ import { Controller, Get, Put, Body, Param } from '@nestjs/common'; import { MetricForecastingService } from './metric-forecasting.service'; import { MetricKindValidationPipe } from './pipes/metric-kind-validation.pipe'; +import { UpdateMetricForecastSettingsDto } from './dto/update-metric-forecast-settings.dto'; import { ConnectionId } from '../common/decorators/connection-id.decorator'; import type { MetricForecast, MetricForecastSettings, - MetricForecastSettingsUpdate, MetricKind, } from '@betterdb/shared'; @@ -33,7 +33,7 @@ export class MetricForecastingController { async updateSettings( @Param('metricKind', MetricKindValidationPipe) metricKind: MetricKind, @ConnectionId() connectionId?: string, - @Body() updates?: MetricForecastSettingsUpdate, + @Body() updates?: UpdateMetricForecastSettingsDto, ): Promise { return this.service.updateSettings(connectionId || 'env-default', metricKind, updates || {}); } diff --git a/apps/api/src/metric-forecasting/metric-forecasting.service.ts b/apps/api/src/metric-forecasting/metric-forecasting.service.ts index b172c8b6..759016e6 100644 --- a/apps/api/src/metric-forecasting/metric-forecasting.service.ts +++ b/apps/api/src/metric-forecasting/metric-forecasting.service.ts @@ -254,20 +254,27 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { if (n === 0) return { slope: 0, intercept: 0 }; if (n === 1) return { slope: 0, intercept: points[0].y }; + // Normalize x values to avoid catastrophic floating-point cancellation + // when x values are large epoch timestamps (~1.7e12). + const x0 = points[0].x; + let sumX = 0, sumY = 0, sumXY = 0, sumX2 = 0; for (const p of points) { - sumX += p.x; + const xNorm = p.x - x0; + sumX += xNorm; sumY += p.y; - sumXY += p.x * p.y; - sumX2 += p.x * p.x; + sumXY += xNorm * p.y; + sumX2 += xNorm * xNorm; } const denom = n * sumX2 - sumX * sumX; if (denom === 0) return { slope: 0, intercept: sumY / n }; const slope = (n * sumXY - sumX * sumY) / denom; - const intercept = (sumY - slope * sumX) / n; + // Compute intercept in normalized space, then adjust back to original timestamps + const interceptNorm = (sumY - slope * sumX) / n; + const intercept = interceptNorm - slope * x0; return { slope, intercept }; } @@ -339,7 +346,7 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { for (const settings of activeSettings) { try { const forecast = await this.getForecast(settings.connectionId, settings.metricKind); - this.logger.log( + this.logger.debug( `[checkAlerts] ${WebhookEventType.METRIC_FORECAST_LIMIT} ${settings.connectionId}:${settings.metricKind} — ` + `current=${forecast.currentValue}, ceiling=${forecast.ceiling}, ` + `timeToLimit=${forecast.timeToLimitMs}, threshold=${settings.alertThresholdMs}, ` + diff --git a/apps/api/src/prometheus/prometheus.service.ts b/apps/api/src/prometheus/prometheus.service.ts index 4a88dbf1..0560a07f 100644 --- a/apps/api/src/prometheus/prometheus.service.ts +++ b/apps/api/src/prometheus/prometheus.service.ts @@ -598,11 +598,20 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule for (const metricKind of ALL_METRIC_KINDS) { try { const settings = await this.storage.getMetricForecastSettings(connectionId, metricKind); - if (!settings || !settings.enabled) continue; + if (!settings || !settings.enabled) { + this.metricForecastTimeToLimitSeconds.remove(connLabel, metricKind); + continue; + } const forecast = await this.metricForecastingService.getForecast(connectionId, metricKind); - if (forecast.ceiling !== null && !forecast.insufficientData && forecast.enabled) { - const value = forecast.timeToLimitMs !== null ? forecast.timeToLimitMs / 1000 : -1; + if (forecast.ceiling === null || !forecast.enabled) { + this.metricForecastTimeToLimitSeconds.remove(connLabel, metricKind); + continue; + } + + if (!forecast.insufficientData) { + const value = + forecast.timeToLimitMs !== null ? forecast.timeToLimitMs / 1000 : Infinity; this.metricForecastTimeToLimitSeconds.labels(connLabel, metricKind).set(value); } } catch { diff --git a/apps/web/src/components/pages/metric-forecasting/MetricSettingsPanel.tsx b/apps/web/src/components/pages/metric-forecasting/MetricSettingsPanel.tsx index 8aa5970a..2a770964 100644 --- a/apps/web/src/components/pages/metric-forecasting/MetricSettingsPanel.tsx +++ b/apps/web/src/components/pages/metric-forecasting/MetricSettingsPanel.tsx @@ -65,9 +65,15 @@ export function MetricSettingsPanel({ step={meta.valueFormatter === 'ratio' ? '0.1' : '1'} value={settings.ceiling ?? ''} placeholder={meta.defaultCeiling !== null ? String(meta.defaultCeiling) : 'No ceiling'} - onChange={(e) => - onUpdate({ ceiling: e.target.value ? parseFloat(e.target.value) : null }) - } + onChange={(e) => { + const raw = e.target.value; + if (!raw) { + onUpdate({ ceiling: null }); + return; + } + const parsed = parseFloat(raw); + onUpdate({ ceiling: isNaN(parsed) || parsed <= 0 ? null : parsed }); + }} className="w-full px-3 py-2 border rounded-md" />
From 18f29042ccb46e8d3078ff5b05fe2165c664cab2 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Tue, 31 Mar 2026 15:37:19 +0300 Subject: [PATCH 11/20] bugfix: persist default ceilings, log prometheus errors, cleanup timers - Use METRIC_KIND_META.defaultCeiling when creating new settings so cpuTotal (100) and memFragmentation (1.5) are persisted and included in alert checks - Replace bare catch with debug-level logging in prometheus scrape - Clear debounce/save timeouts on component unmount --- .../src/metric-forecasting/metric-forecasting.service.ts | 3 ++- apps/api/src/prometheus/prometheus.service.ts | 6 ++++-- apps/web/src/pages/MetricForecasting.tsx | 9 ++++++++- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/apps/api/src/metric-forecasting/metric-forecasting.service.ts b/apps/api/src/metric-forecasting/metric-forecasting.service.ts index 759016e6..0e3a79ff 100644 --- a/apps/api/src/metric-forecasting/metric-forecasting.service.ts +++ b/apps/api/src/metric-forecasting/metric-forecasting.service.ts @@ -16,6 +16,7 @@ import type { MetricKind, } from '@betterdb/shared'; import { + METRIC_KIND_META, WEBHOOK_EVENTS_PRO_SERVICE, WebhookEventType, type IWebhookEventsProService, @@ -238,7 +239,7 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { connectionId, metricKind, enabled: true, - ceiling: null, + ceiling: METRIC_KIND_META[metricKind].defaultCeiling, rollingWindowMs: globalSettings.metricForecastingDefaultRollingWindowMs, alertThresholdMs: globalSettings.metricForecastingDefaultAlertThresholdMs, updatedAt: Date.now(), diff --git a/apps/api/src/prometheus/prometheus.service.ts b/apps/api/src/prometheus/prometheus.service.ts index 0560a07f..7f7940df 100644 --- a/apps/api/src/prometheus/prometheus.service.ts +++ b/apps/api/src/prometheus/prometheus.service.ts @@ -614,8 +614,10 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule forecast.timeToLimitMs !== null ? forecast.timeToLimitMs / 1000 : Infinity; this.metricForecastTimeToLimitSeconds.labels(connLabel, metricKind).set(value); } - } catch { - // Silently skip if forecasting unavailable for this metric kind + } catch (err) { + this.logger.debug( + `Metric forecast scrape skipped for ${connectionId}:${metricKind}: ${err instanceof Error ? err.message : String(err)}`, + ); } } } diff --git a/apps/web/src/pages/MetricForecasting.tsx b/apps/web/src/pages/MetricForecasting.tsx index 26e6ef20..765d4c02 100644 --- a/apps/web/src/pages/MetricForecasting.tsx +++ b/apps/web/src/pages/MetricForecasting.tsx @@ -1,4 +1,4 @@ -import { useRef, useState, useCallback } from 'react'; +import { useCallback, useEffect, useRef, useState } from 'react'; import { useQuery, useQueryClient } from '@tanstack/react-query'; import { useSearchParams } from 'react-router-dom'; import { useConnection } from '../hooks/useConnection'; @@ -36,6 +36,13 @@ export function MetricForecasting() { const saveTimeout = useRef>(undefined); const debounceTimeout = useRef>(undefined); + useEffect(() => { + return () => { + if (debounceTimeout.current) clearTimeout(debounceTimeout.current); + if (saveTimeout.current) clearTimeout(saveTimeout.current); + }; + }, []); + const connectionId = currentConnection?.id; const tabParam = searchParams.get('tab'); const activeTab: MetricKind = From 67205e5ff85f89a04358079139885818eb3ec7b3 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Tue, 31 Mar 2026 16:09:45 +0300 Subject: [PATCH 12/20] bugfix: address second round of PR review feedback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Replace Infinity with Number.MAX_SAFE_INTEGER for JSON safety - Skip dispatch when forecasting disabled or insufficient data - Skip dispatch when resolved ceiling is null - Remove ceiling IS NOT NULL filter from all storage adapters so usedMemory auto-detected and default ceilings trigger alerts - Change ceiling DTO validation from Min(0) to Min(1) - Make ALL_METRIC_KINDS readonly - Add forecast cache pruning in checkAlerts - Fix formatGrowthRate missing % and x suffixes - Use Map for O(1) chart data lookups instead of O(n²) find - Suppress PaymentRequiredError retries in usePolling - Fix loading stuck true when polling disabled --- .../__tests__/metric-forecasting.service.spec.ts | 5 +++-- .../dto/update-metric-forecast-settings.dto.ts | 2 +- .../metric-forecasting.service.ts | 16 +++++++++++++++- apps/api/src/storage/adapters/memory.adapter.ts | 2 +- .../api/src/storage/adapters/postgres.adapter.ts | 2 +- apps/api/src/storage/adapters/sqlite.adapter.ts | 2 +- .../pages/metric-forecasting/MetricChart.tsx | 7 +++++-- .../__tests__/formatters.test.ts | 16 ++++++++++++++-- .../pages/metric-forecasting/formatters.ts | 2 +- apps/web/src/hooks/usePolling.ts | 6 +++++- .../shared/src/types/metric-forecasting.types.ts | 2 +- 11 files changed, 48 insertions(+), 14 deletions(-) diff --git a/apps/api/src/metric-forecasting/__tests__/metric-forecasting.service.spec.ts b/apps/api/src/metric-forecasting/__tests__/metric-forecasting.service.spec.ts index fb39b618..f0dfaac2 100644 --- a/apps/api/src/metric-forecasting/__tests__/metric-forecasting.service.spec.ts +++ b/apps/api/src/metric-forecasting/__tests__/metric-forecasting.service.spec.ts @@ -153,8 +153,9 @@ describe('MetricForecastingService', () => { makeSettings({ connectionId: 'c', metricKind: 'cpuTotal', enabled: false, ceiling: 80 }), ); const active = await storage.getActiveMetricForecastSettings(); - expect(active).toHaveLength(1); - expect(active[0].connectionId).toBe('a'); + expect(active).toHaveLength(2); + const ids = active.map((s) => s.connectionId).sort(); + expect(ids).toEqual(['a', 'b']); }); }); diff --git a/apps/api/src/metric-forecasting/dto/update-metric-forecast-settings.dto.ts b/apps/api/src/metric-forecasting/dto/update-metric-forecast-settings.dto.ts index 7bfe124a..ab012a76 100644 --- a/apps/api/src/metric-forecasting/dto/update-metric-forecast-settings.dto.ts +++ b/apps/api/src/metric-forecasting/dto/update-metric-forecast-settings.dto.ts @@ -8,7 +8,7 @@ export class UpdateMetricForecastSettingsDto { @IsOptional() @ValidateIf((_obj, value) => value !== null) @IsNumber() - @Min(0) + @Min(1) ceiling?: number | null; @IsOptional() diff --git a/apps/api/src/metric-forecasting/metric-forecasting.service.ts b/apps/api/src/metric-forecasting/metric-forecasting.service.ts index 0e3a79ff..a779abc6 100644 --- a/apps/api/src/metric-forecasting/metric-forecasting.service.ts +++ b/apps/api/src/metric-forecasting/metric-forecasting.service.ts @@ -336,17 +336,31 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { }; } + private pruneCache(): void { + const maxAge = CACHE_TTL_MS * 2; + const now = Date.now(); + for (const [key, entry] of this.forecastCache) { + if (now - entry.computedAt > maxAge) { + this.forecastCache.delete(key); + } + } + } + private async checkAlerts(): Promise { if (!this.webhookEventsProService) { this.logger.warn('WebhookEventsProService not initialized'); return; } + this.pruneCache(); + try { const activeSettings = await this.storage.getActiveMetricForecastSettings(); for (const settings of activeSettings) { try { const forecast = await this.getForecast(settings.connectionId, settings.metricKind); + if (!forecast.enabled || forecast.insufficientData) continue; + if (forecast.ceiling === null) continue; this.logger.debug( `[checkAlerts] ${WebhookEventType.METRIC_FORECAST_LIMIT} ${settings.connectionId}:${settings.metricKind} — ` + `current=${forecast.currentValue}, ceiling=${forecast.ceiling}, ` + @@ -359,7 +373,7 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { metricKind: settings.metricKind, currentValue: forecast.currentValue, ceiling: forecast.ceiling, - timeToLimitMs: forecast.timeToLimitMs ?? Infinity, + timeToLimitMs: forecast.timeToLimitMs ?? Number.MAX_SAFE_INTEGER, threshold: settings.alertThresholdMs, growthRate: forecast.growthRate, timestamp: Date.now(), diff --git a/apps/api/src/storage/adapters/memory.adapter.ts b/apps/api/src/storage/adapters/memory.adapter.ts index fe2ef838..8105eef7 100644 --- a/apps/api/src/storage/adapters/memory.adapter.ts +++ b/apps/api/src/storage/adapters/memory.adapter.ts @@ -1385,7 +1385,7 @@ export class MemoryAdapter implements StoragePort { async getActiveMetricForecastSettings(): Promise { return [...this.metricForecastSettings.values()].filter( - (s) => s.enabled && s.ceiling !== null, + (s) => s.enabled, ); } } diff --git a/apps/api/src/storage/adapters/postgres.adapter.ts b/apps/api/src/storage/adapters/postgres.adapter.ts index 0298955b..835836de 100644 --- a/apps/api/src/storage/adapters/postgres.adapter.ts +++ b/apps/api/src/storage/adapters/postgres.adapter.ts @@ -3578,7 +3578,7 @@ export class PostgresAdapter implements StoragePort { async getActiveMetricForecastSettings(): Promise { if (!this.pool) throw new Error('Database not initialized'); const result = await this.pool.query( - 'SELECT * FROM metric_forecast_settings WHERE enabled = true AND ceiling IS NOT NULL', + 'SELECT * FROM metric_forecast_settings WHERE enabled = true', ); return result.rows.map((row: any) => this.mapMetricForecastRow(row)); } diff --git a/apps/api/src/storage/adapters/sqlite.adapter.ts b/apps/api/src/storage/adapters/sqlite.adapter.ts index 9a247f6f..b5f0515f 100644 --- a/apps/api/src/storage/adapters/sqlite.adapter.ts +++ b/apps/api/src/storage/adapters/sqlite.adapter.ts @@ -3338,7 +3338,7 @@ export class SqliteAdapter implements StoragePort { if (!this.db) throw new Error('Database not initialized'); const rows = this.db .prepare( - 'SELECT * FROM metric_forecast_settings WHERE enabled = 1 AND ceiling IS NOT NULL', + 'SELECT * FROM metric_forecast_settings WHERE enabled = 1', ) .all() as MetricForecastSettingsRow[]; return rows.map((row) => this.mapMetricForecastRow(row)); diff --git a/apps/web/src/components/pages/metric-forecasting/MetricChart.tsx b/apps/web/src/components/pages/metric-forecasting/MetricChart.tsx index 7baed193..88e4ae09 100644 --- a/apps/web/src/components/pages/metric-forecasting/MetricChart.tsx +++ b/apps/web/src/components/pages/metric-forecasting/MetricChart.tsx @@ -57,12 +57,15 @@ export function MetricChart({ } } + const chartDataByTime = new Map(chartData.map((d) => [d.time, d])); + const trendDataByTime = new Map(trendData.map((d) => [d.time, d])); + const allTimes = new Set([...chartData.map((d) => d.time), ...trendData.map((d) => d.time)]); return [...allTimes] .sort((a, b) => a - b) .map((t) => { - const dp = chartData.find((d) => d.time === t); - const tp = trendData.find((d) => d.time === t); + const dp = chartDataByTime.get(t); + const tp = trendDataByTime.get(t); return { time: t, label: formatTime(t), diff --git a/apps/web/src/components/pages/metric-forecasting/__tests__/formatters.test.ts b/apps/web/src/components/pages/metric-forecasting/__tests__/formatters.test.ts index 306c3245..dffa699b 100644 --- a/apps/web/src/components/pages/metric-forecasting/__tests__/formatters.test.ts +++ b/apps/web/src/components/pages/metric-forecasting/__tests__/formatters.test.ts @@ -48,7 +48,7 @@ describe('formatMetricValue', () => { describe('formatGrowthRate', () => { it('formats positive growth', () => { - expect(formatGrowthRate(5000, 'ops')).toBe('+5.0K/hr'); + expect(formatGrowthRate(5000, 'ops')).toBe('+5.0K ops/sec/hr'); }); it('formats negative growth', () => { @@ -56,7 +56,19 @@ describe('formatGrowthRate', () => { }); it('formats zero growth', () => { - expect(formatGrowthRate(0, 'ops')).toBe('+0/hr'); + expect(formatGrowthRate(0, 'ops')).toBe('+0 ops/sec/hr'); + }); + + it('formats percent growth rate with % suffix', () => { + expect(formatGrowthRate(0.5, 'percent')).toBe('+0.5%/hr'); + }); + + it('formats ratio growth rate with x suffix', () => { + expect(formatGrowthRate(0.1, 'ratio')).toBe('+0.10x/hr'); + }); + + it('formats negative percent growth rate', () => { + expect(formatGrowthRate(-2.3, 'percent')).toBe('-2.3%/hr'); }); }); diff --git a/apps/web/src/components/pages/metric-forecasting/formatters.ts b/apps/web/src/components/pages/metric-forecasting/formatters.ts index 1e014c14..fdaa36b8 100644 --- a/apps/web/src/components/pages/metric-forecasting/formatters.ts +++ b/apps/web/src/components/pages/metric-forecasting/formatters.ts @@ -51,7 +51,7 @@ export function formatGrowthRate( formatter: 'bytes' | 'percent' | 'ratio' | 'ops', ): string { const sign = rate >= 0 ? '+' : '-'; - return `${sign}${formatNumber(Math.abs(rate), formatter)}/hr`; + return `${sign}${formatMetricValue(Math.abs(rate), formatter)}/hr`; } export function formatTime(timestamp: number): string { diff --git a/apps/web/src/hooks/usePolling.ts b/apps/web/src/hooks/usePolling.ts index 593ef199..811c3395 100644 --- a/apps/web/src/hooks/usePolling.ts +++ b/apps/web/src/hooks/usePolling.ts @@ -50,6 +50,10 @@ export function usePolling({ enabled, refetchInterval: interval, refetchIntervalInBackground: false, + retry: (failureCount, error) => { + if (error instanceof PaymentRequiredError) return false; + return failureCount < 1; + }, }); const refresh = useCallback(async () => { @@ -58,5 +62,5 @@ export function usePolling({ const lastUpdated = dataUpdatedAt ? new Date(dataUpdatedAt) : null; - return { data: data ?? null, error, loading: isLoading, lastUpdated, refresh }; + return { data: data ?? null, error, loading: enabled ? isLoading : false, lastUpdated, refresh }; } diff --git a/packages/shared/src/types/metric-forecasting.types.ts b/packages/shared/src/types/metric-forecasting.types.ts index c4d12beb..52c063ce 100644 --- a/packages/shared/src/types/metric-forecasting.types.ts +++ b/packages/shared/src/types/metric-forecasting.types.ts @@ -79,7 +79,7 @@ export const METRIC_KIND_META: Record = { }, }; -export const ALL_METRIC_KINDS: MetricKind[] = [ +export const ALL_METRIC_KINDS: readonly MetricKind[] = [ 'opsPerSec', 'usedMemory', 'cpuTotal', From f96816862bdf2399fc0f1c10c995e74e9d12d983 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Tue, 31 Mar 2026 16:54:33 +0300 Subject: [PATCH 13/20] chore: add parseInt radix and cpu extractor clarification comment --- .../pages/metric-forecasting/MetricSettingsPanel.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/web/src/components/pages/metric-forecasting/MetricSettingsPanel.tsx b/apps/web/src/components/pages/metric-forecasting/MetricSettingsPanel.tsx index 2a770964..547a0ab1 100644 --- a/apps/web/src/components/pages/metric-forecasting/MetricSettingsPanel.tsx +++ b/apps/web/src/components/pages/metric-forecasting/MetricSettingsPanel.tsx @@ -49,7 +49,7 @@ export function MetricSettingsPanel({ onUpdate({ alertThresholdMs: parseInt(e.target.value) })} + onChange={(e) => onUpdate({ alertThresholdMs: parseInt(e.target.value, 10) })} className="w-full px-3 py-2 border rounded-md" disabled={settings.ceiling === null} > From 36dfef9e0570f4684c65e603aec60ac1659f41d5 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Tue, 31 Mar 2026 17:08:53 +0300 Subject: [PATCH 14/20] chore: remove Prometheus Infinity sentinel, prune cache in non-Pro - Replace Infinity gauge with .remove() for stable/falling metrics - Add separate prune timer so cache cleanup works without Pro service --- .../metric-forecasting/metric-forecasting.service.ts | 7 +++++++ apps/api/src/prometheus/prometheus.service.ts | 11 ++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/apps/api/src/metric-forecasting/metric-forecasting.service.ts b/apps/api/src/metric-forecasting/metric-forecasting.service.ts index a779abc6..e8f2b142 100644 --- a/apps/api/src/metric-forecasting/metric-forecasting.service.ts +++ b/apps/api/src/metric-forecasting/metric-forecasting.service.ts @@ -35,6 +35,7 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { private readonly logger = new Logger(MetricForecastingService.name); private forecastCache = new Map(); private alertInterval: ReturnType | null = null; + private pruneInterval: ReturnType | null = null; constructor( @Inject('STORAGE_CLIENT') private readonly storage: StoragePort, @@ -50,9 +51,15 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { this.logger.log('Enabling metric forecasting webhook alerts'); this.alertInterval = setInterval(() => this.checkAlerts(), ALERT_CHECK_INTERVAL_MS); } + // Prune stale cache entries even in non-Pro deployments + this.pruneInterval = setInterval(() => this.pruneCache(), CACHE_TTL_MS * 10); } onModuleDestroy(): void { + if (this.pruneInterval) { + clearInterval(this.pruneInterval); + this.pruneInterval = null; + } if (this.alertInterval) { clearInterval(this.alertInterval); this.alertInterval = null; diff --git a/apps/api/src/prometheus/prometheus.service.ts b/apps/api/src/prometheus/prometheus.service.ts index 7f7940df..683a6408 100644 --- a/apps/api/src/prometheus/prometheus.service.ts +++ b/apps/api/src/prometheus/prometheus.service.ts @@ -610,9 +610,14 @@ export class PrometheusService extends MultiConnectionPoller implements OnModule } if (!forecast.insufficientData) { - const value = - forecast.timeToLimitMs !== null ? forecast.timeToLimitMs / 1000 : Infinity; - this.metricForecastTimeToLimitSeconds.labels(connLabel, metricKind).set(value); + if (forecast.timeToLimitMs !== null) { + this.metricForecastTimeToLimitSeconds + .labels(connLabel, metricKind) + .set(forecast.timeToLimitMs / 1000); + } else { + // Stable/falling — remove label to avoid stale or sentinel values in Prometheus + this.metricForecastTimeToLimitSeconds.remove(connLabel, metricKind); + } } } catch (err) { this.logger.debug( From d39507da556d48c4b5d040b83233eec535d59c17 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Tue, 31 Mar 2026 23:44:17 +0300 Subject: [PATCH 15/20] chore: tighten dispatch interface types, fix usePolling memoization, flush saves on tab switch - Use WebhookEventType and MetricKind instead of string in IWebhookEventsProService.dispatchMetricForecastLimit - Memoize resolvedKey in usePolling to stabilize useCallback - Flush pending debounced save when switching forecast tabs instead of silently dropping it --- apps/web/src/hooks/usePolling.ts | 7 +++-- apps/web/src/pages/MetricForecasting.tsx | 33 +++++++++++++++++++----- packages/shared/src/webhooks/types.ts | 5 ++-- 3 files changed, 34 insertions(+), 11 deletions(-) diff --git a/apps/web/src/hooks/usePolling.ts b/apps/web/src/hooks/usePolling.ts index 811c3395..5bdc8644 100644 --- a/apps/web/src/hooks/usePolling.ts +++ b/apps/web/src/hooks/usePolling.ts @@ -1,5 +1,5 @@ import { useQuery, useQueryClient } from '@tanstack/react-query'; -import { useCallback, useRef } from 'react'; +import { useCallback, useMemo, useRef } from 'react'; import { PaymentRequiredError } from '../api/client'; import { useUpgradePrompt } from './useUpgradePrompt'; @@ -33,7 +33,10 @@ export function usePolling({ stableKeyRef.current = ++keyCounter; } - const resolvedKey = queryKey ?? ['polling', stableKeyRef.current, refetchKey]; + const resolvedKey = useMemo( + () => queryKey ?? ['polling', stableKeyRef.current, refetchKey], + [queryKey, refetchKey], + ); const { data, error, isLoading, dataUpdatedAt } = useQuery({ queryKey: resolvedKey, diff --git a/apps/web/src/pages/MetricForecasting.tsx b/apps/web/src/pages/MetricForecasting.tsx index 765d4c02..fb267da2 100644 --- a/apps/web/src/pages/MetricForecasting.tsx +++ b/apps/web/src/pages/MetricForecasting.tsx @@ -50,11 +50,25 @@ export function MetricForecasting() { ? (tabParam as MetricKind) : 'opsPerSec'; + const pendingCallback = useRef<(() => Promise) | null>(null); + + const flushPendingSave = useCallback(() => { + if (debounceTimeout.current) { + clearTimeout(debounceTimeout.current); + debounceTimeout.current = undefined; + } + if (pendingCallback.current) { + pendingCallback.current(); + pendingCallback.current = null; + } + }, []); + const setActiveTab = useCallback( (tab: MetricKind) => { + flushPendingSave(); setSearchParams({ tab }, { replace: true }); }, - [setSearchParams], + [setSearchParams, flushPendingSave], ); const meta = METRIC_KIND_META[activeTab]; @@ -97,23 +111,28 @@ export function MetricForecasting() { (prev: typeof settings) => (prev ? { ...prev, ...updates, updatedAt: Date.now() } : prev), ); - debounceTimeout.current = setTimeout(async () => { + const saveTab = activeTab; + const doSave = async () => { + pendingCallback.current = null; try { - const updated = await metricForecastingApi.updateSettings(activeTab, updates); - queryClient.setQueryData(['metric-forecast-settings', connectionId, activeTab], updated); + const updated = await metricForecastingApi.updateSettings(saveTab, updates); + queryClient.setQueryData(['metric-forecast-settings', connectionId, saveTab], updated); setSaveStatus('saved'); await queryClient.invalidateQueries({ - queryKey: ['metric-forecast', connectionId, activeTab], + queryKey: ['metric-forecast', connectionId, saveTab], }); if (saveTimeout.current) clearTimeout(saveTimeout.current); saveTimeout.current = setTimeout(() => setSaveStatus('idle'), 2000); } catch { await queryClient.invalidateQueries({ - queryKey: ['metric-forecast-settings', connectionId, activeTab], + queryKey: ['metric-forecast-settings', connectionId, saveTab], }); setSaveStatus('error'); } - }, 500); + }; + + pendingCallback.current = doSave; + debounceTimeout.current = setTimeout(doSave, 500); }; if (!forecast || !settings) return ; diff --git a/packages/shared/src/webhooks/types.ts b/packages/shared/src/webhooks/types.ts index 2c053080..0e44b768 100644 --- a/packages/shared/src/webhooks/types.ts +++ b/packages/shared/src/webhooks/types.ts @@ -55,6 +55,7 @@ export const ENTERPRISE_EVENTS: WebhookEventType[] = [ // ============================================================================ import { Tier } from '../license/types'; +import type { MetricKind } from '../types/metric-forecasting.types'; export { Tier }; /** @@ -314,8 +315,8 @@ export interface IWebhookEventsProService { }): Promise; dispatchMetricForecastLimit(data: { - event: string; - metricKind: string; + event: WebhookEventType; + metricKind: MetricKind; currentValue: number; ceiling: number | null; timeToLimitMs: number; From aee363f0e19b11f542f8b906405c99f158d89abc Mon Sep 17 00:00:00 2001 From: jamby77 Date: Wed, 1 Apr 2026 09:30:59 +0300 Subject: [PATCH 16/20] fix: add void operator to suppress ignored promise in MetricForecasting --- apps/web/src/pages/MetricForecasting.tsx | 2 +- .../{throughput-limit.spec.ts => metric-forecast-limit.spec.ts} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename proprietary/webhook-pro/__tests__/{throughput-limit.spec.ts => metric-forecast-limit.spec.ts} (100%) diff --git a/apps/web/src/pages/MetricForecasting.tsx b/apps/web/src/pages/MetricForecasting.tsx index fb267da2..f337051d 100644 --- a/apps/web/src/pages/MetricForecasting.tsx +++ b/apps/web/src/pages/MetricForecasting.tsx @@ -58,7 +58,7 @@ export function MetricForecasting() { debounceTimeout.current = undefined; } if (pendingCallback.current) { - pendingCallback.current(); + void pendingCallback.current(); pendingCallback.current = null; } }, []); diff --git a/proprietary/webhook-pro/__tests__/throughput-limit.spec.ts b/proprietary/webhook-pro/__tests__/metric-forecast-limit.spec.ts similarity index 100% rename from proprietary/webhook-pro/__tests__/throughput-limit.spec.ts rename to proprietary/webhook-pro/__tests__/metric-forecast-limit.spec.ts From 0281d63d9a235cb1b71651d9975b938a505d6d96 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Wed, 1 Apr 2026 09:41:33 +0300 Subject: [PATCH 17/20] bugfix: accumulate debounced updates, fix zero-start trend detection, use event param - Accumulate pending settings updates in a ref so rapid changes within the debounce window aren't dropped - Fix trend detection when predictedStart is zero (growing from idle) - Use data.event instead of hardcoded WebhookEventType in pro service - Rename throughput-limit.spec.ts to metric-forecast-limit.spec.ts --- .../src/metric-forecasting/metric-forecasting.service.ts | 6 +++++- apps/web/src/pages/MetricForecasting.tsx | 6 +++++- .../webhook-pro/__tests__/metric-forecast-limit.spec.ts | 6 +++--- proprietary/webhook-pro/webhook-events-pro.service.ts | 8 ++++---- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/apps/api/src/metric-forecasting/metric-forecasting.service.ts b/apps/api/src/metric-forecasting/metric-forecasting.service.ts index e8f2b142..6fbce529 100644 --- a/apps/api/src/metric-forecasting/metric-forecasting.service.ts +++ b/apps/api/src/metric-forecasting/metric-forecasting.service.ts @@ -123,7 +123,11 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { const currentValue = latestValue; const growthRate = slope * 3_600_000; // units per hour const growthPercent = - predictedStart !== 0 ? ((predictedEnd - predictedStart) / Math.abs(predictedStart)) * 100 : 0; + predictedStart !== 0 + ? ((predictedEnd - predictedStart) / Math.abs(predictedStart)) * 100 + : predictedEnd !== 0 + ? 100 // growing from zero — treat as significant rise + : 0; const trendDirection: 'rising' | 'falling' | 'stable' = growthPercent > TREND_THRESHOLD_PERCENT diff --git a/apps/web/src/pages/MetricForecasting.tsx b/apps/web/src/pages/MetricForecasting.tsx index f337051d..0b1aae9b 100644 --- a/apps/web/src/pages/MetricForecasting.tsx +++ b/apps/web/src/pages/MetricForecasting.tsx @@ -51,6 +51,7 @@ export function MetricForecasting() { : 'opsPerSec'; const pendingCallback = useRef<(() => Promise) | null>(null); + const pendingUpdates = useRef({}); const flushPendingSave = useCallback(() => { if (debounceTimeout.current) { @@ -105,6 +106,7 @@ export function MetricForecasting() { const updateSetting = (updates: MetricForecastSettingsUpdate) => { if (debounceTimeout.current) clearTimeout(debounceTimeout.current); + pendingUpdates.current = { ...pendingUpdates.current, ...updates }; queryClient.setQueryData( ['metric-forecast-settings', connectionId, activeTab], @@ -114,8 +116,10 @@ export function MetricForecasting() { const saveTab = activeTab; const doSave = async () => { pendingCallback.current = null; + const toSave = pendingUpdates.current; + pendingUpdates.current = {}; try { - const updated = await metricForecastingApi.updateSettings(saveTab, updates); + const updated = await metricForecastingApi.updateSettings(saveTab, toSave); queryClient.setQueryData(['metric-forecast-settings', connectionId, saveTab], updated); setSaveStatus('saved'); await queryClient.invalidateQueries({ diff --git a/proprietary/webhook-pro/__tests__/metric-forecast-limit.spec.ts b/proprietary/webhook-pro/__tests__/metric-forecast-limit.spec.ts index ba4e20a8..9a9d6c74 100644 --- a/proprietary/webhook-pro/__tests__/metric-forecast-limit.spec.ts +++ b/proprietary/webhook-pro/__tests__/metric-forecast-limit.spec.ts @@ -10,8 +10,8 @@ describe('WebhookEventsProService - dispatchMetricForecastLimit', () => { let licenseService: { getLicenseTier: jest.Mock }; const testData = { - event: 'metric_forecast.limit', - metricKind: 'opsPerSec', + event: WebhookEventType.METRIC_FORECAST_LIMIT, + metricKind: 'opsPerSec' as const, currentValue: 50_000, ceiling: 80_000, timeToLimitMs: 7_200_000, // 2 hours @@ -48,7 +48,7 @@ describe('WebhookEventsProService - dispatchMetricForecastLimit', () => { expect(webhookDispatcher.dispatchThresholdAlert).toHaveBeenCalledTimes(1); - const [eventType, alertKey, value, threshold, isAbove] = + const [eventType, _alertKey, value, threshold, isAbove] = webhookDispatcher.dispatchThresholdAlert.mock.calls[0]; expect(eventType).toBe(WebhookEventType.METRIC_FORECAST_LIMIT); diff --git a/proprietary/webhook-pro/webhook-events-pro.service.ts b/proprietary/webhook-pro/webhook-events-pro.service.ts index ccb93b84..706ab6a8 100644 --- a/proprietary/webhook-pro/webhook-events-pro.service.ts +++ b/proprietary/webhook-pro/webhook-events-pro.service.ts @@ -1,6 +1,6 @@ import { Injectable, Logger, OnModuleInit, Inject } from '@nestjs/common'; import { WebhookDispatcherService } from '@app/webhooks/webhook-dispatcher.service'; -import { WebhookEventType } from '@betterdb/shared'; +import { WebhookEventType, type MetricKind } from '@betterdb/shared'; import { LicenseService } from '@proprietary/licenses'; /** @@ -261,8 +261,8 @@ export class WebhookEventsProService implements OnModuleInit { * Called when projected time-to-limit drops below configured threshold */ async dispatchMetricForecastLimit(data: { - event: string; - metricKind: string; + event: WebhookEventType; + metricKind: MetricKind; currentValue: number; ceiling: number | null; timeToLimitMs: number; @@ -281,7 +281,7 @@ export class WebhookEventsProService implements OnModuleInit { const timeHours = (data.timeToLimitMs / 3_600_000).toFixed(1); await this.webhookDispatcher.dispatchThresholdAlert( - WebhookEventType.METRIC_FORECAST_LIMIT, + data.event, `metric_forecast_limit:${data.connectionId}:${data.metricKind}`, data.timeToLimitMs, data.threshold, From 98ccfad8e2552a630f342e9ef8be4e077dde4840 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Wed, 1 Apr 2026 09:52:17 +0300 Subject: [PATCH 18/20] bugfix: pass real dataPointCount in insufficient forecast, allow fractional ceilings, clamp trend line - buildInsufficientForecast now receives actual sorted.length instead of hardcoding 0 - Change ceiling @Min(1) to @Min(0.01) so memFragmentation ceiling of 1.0 is valid - Clamp trend line projection to Math.max(0) to prevent negative values --- .../dto/update-metric-forecast-settings.dto.ts | 2 +- .../metric-forecasting/metric-forecasting.service.ts | 11 ++++++++--- .../pages/metric-forecasting/MetricChart.tsx | 7 ++++--- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/apps/api/src/metric-forecasting/dto/update-metric-forecast-settings.dto.ts b/apps/api/src/metric-forecasting/dto/update-metric-forecast-settings.dto.ts index ab012a76..8008aaf9 100644 --- a/apps/api/src/metric-forecasting/dto/update-metric-forecast-settings.dto.ts +++ b/apps/api/src/metric-forecasting/dto/update-metric-forecast-settings.dto.ts @@ -8,7 +8,7 @@ export class UpdateMetricForecastSettingsDto { @IsOptional() @ValidateIf((_obj, value) => value !== null) @IsNumber() - @Min(1) + @Min(0.01) ceiling?: number | null; @IsOptional() diff --git a/apps/api/src/metric-forecasting/metric-forecasting.service.ts b/apps/api/src/metric-forecasting/metric-forecasting.service.ts index 6fbce529..fce368be 100644 --- a/apps/api/src/metric-forecasting/metric-forecasting.service.ts +++ b/apps/api/src/metric-forecasting/metric-forecasting.service.ts @@ -104,11 +104,15 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { // Check sufficient data if (sorted.length < MIN_DATA_POINTS) { - return this.buildInsufficientForecast(connectionId, metricKind, settings, latestValue); + return this.buildInsufficientForecast( + connectionId, metricKind, settings, latestValue, sorted.length, + ); } const timeSpan = sorted[sorted.length - 1].timestamp - sorted[0].timestamp; if (timeSpan < MIN_TIME_SPAN_MS) { - return this.buildInsufficientForecast(connectionId, metricKind, settings, latestValue); + return this.buildInsufficientForecast( + connectionId, metricKind, settings, latestValue, sorted.length, + ); } // Linear regression on extracted metric @@ -326,6 +330,7 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { metricKind: MetricKind, settings: MetricForecastSettings, currentValue: number, + dataPointCount = 0, ): MetricForecast { return { connectionId, @@ -335,7 +340,7 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { growthRate: 0, growthPercent: 0, trendDirection: 'stable', - dataPointCount: 0, + dataPointCount, windowMs: settings.rollingWindowMs, ceiling: settings.ceiling, timeToLimitMs: null, diff --git a/apps/web/src/components/pages/metric-forecasting/MetricChart.tsx b/apps/web/src/components/pages/metric-forecasting/MetricChart.tsx index 88e4ae09..01540c8d 100644 --- a/apps/web/src/components/pages/metric-forecasting/MetricChart.tsx +++ b/apps/web/src/components/pages/metric-forecasting/MetricChart.tsx @@ -50,10 +50,11 @@ export function MetricChart({ const lastVal = chartData[chartData.length - 1].value; const intercept = lastVal - slopePerMs * lastTime; - trendData.push({ time: firstTime, trend: slopePerMs * firstTime + intercept, label: formatTime(firstTime) }); - trendData.push({ time: lastTime, trend: slopePerMs * lastTime + intercept, label: formatTime(lastTime) }); + const trendAt = (t: number) => Math.max(0, slopePerMs * t + intercept); + trendData.push({ time: firstTime, trend: trendAt(firstTime), label: formatTime(firstTime) }); + trendData.push({ time: lastTime, trend: trendAt(lastTime), label: formatTime(lastTime) }); if (endTime > lastTime) { - trendData.push({ time: endTime, trend: slopePerMs * endTime + intercept, label: formatTime(endTime) }); + trendData.push({ time: endTime, trend: trendAt(endTime), label: formatTime(endTime) }); } } From 9a33fca76b775ba2dacc0a55dbc471991584a892 Mon Sep 17 00:00:00 2001 From: jamby77 Date: Wed, 1 Apr 2026 10:06:18 +0300 Subject: [PATCH 19/20] chore: add re-entry guard on checkAlerts, use ENV_DEFAULT_ID constant - Prevent concurrent checkAlerts runs with a boolean guard - Export ENV_DEFAULT_ID from connection-registry and use it in the metric forecasting controller --- apps/api/src/connections/connection-registry.service.ts | 2 +- .../metric-forecasting/metric-forecasting.controller.ts | 7 ++++--- .../src/metric-forecasting/metric-forecasting.service.ts | 5 +++++ 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/apps/api/src/connections/connection-registry.service.ts b/apps/api/src/connections/connection-registry.service.ts index 3824bcdb..0787b223 100644 --- a/apps/api/src/connections/connection-registry.service.ts +++ b/apps/api/src/connections/connection-registry.service.ts @@ -10,7 +10,7 @@ import { RuntimeCapabilityTracker } from './runtime-capability-tracker.service'; import { UsageTelemetryService } from '../telemetry/usage-telemetry.service'; // TODO: Export and use across the codebase instead of hardcoded 'env-default' strings -const ENV_DEFAULT_ID = 'env-default'; +export const ENV_DEFAULT_ID = 'env-default'; @Injectable() export class ConnectionRegistry implements OnModuleInit, OnModuleDestroy { diff --git a/apps/api/src/metric-forecasting/metric-forecasting.controller.ts b/apps/api/src/metric-forecasting/metric-forecasting.controller.ts index 974af2ff..8c00b53a 100644 --- a/apps/api/src/metric-forecasting/metric-forecasting.controller.ts +++ b/apps/api/src/metric-forecasting/metric-forecasting.controller.ts @@ -3,6 +3,7 @@ import { MetricForecastingService } from './metric-forecasting.service'; import { MetricKindValidationPipe } from './pipes/metric-kind-validation.pipe'; import { UpdateMetricForecastSettingsDto } from './dto/update-metric-forecast-settings.dto'; import { ConnectionId } from '../common/decorators/connection-id.decorator'; +import { ENV_DEFAULT_ID } from '../connections/connection-registry.service'; import type { MetricForecast, MetricForecastSettings, @@ -18,7 +19,7 @@ export class MetricForecastingController { @Param('metricKind', MetricKindValidationPipe) metricKind: MetricKind, @ConnectionId() connectionId?: string, ): Promise { - return this.service.getForecast(connectionId || 'env-default', metricKind); + return this.service.getForecast(connectionId || ENV_DEFAULT_ID, metricKind); } @Get(':metricKind/settings') @@ -26,7 +27,7 @@ export class MetricForecastingController { @Param('metricKind', MetricKindValidationPipe) metricKind: MetricKind, @ConnectionId() connectionId?: string, ): Promise { - return this.service.getSettings(connectionId || 'env-default', metricKind); + return this.service.getSettings(connectionId || ENV_DEFAULT_ID, metricKind); } @Put(':metricKind/settings') @@ -35,6 +36,6 @@ export class MetricForecastingController { @ConnectionId() connectionId?: string, @Body() updates?: UpdateMetricForecastSettingsDto, ): Promise { - return this.service.updateSettings(connectionId || 'env-default', metricKind, updates || {}); + return this.service.updateSettings(connectionId || ENV_DEFAULT_ID, metricKind, updates || {}); } } diff --git a/apps/api/src/metric-forecasting/metric-forecasting.service.ts b/apps/api/src/metric-forecasting/metric-forecasting.service.ts index fce368be..6b08fd38 100644 --- a/apps/api/src/metric-forecasting/metric-forecasting.service.ts +++ b/apps/api/src/metric-forecasting/metric-forecasting.service.ts @@ -36,6 +36,7 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { private forecastCache = new Map(); private alertInterval: ReturnType | null = null; private pruneInterval: ReturnType | null = null; + private alertCheckRunning = false; constructor( @Inject('STORAGE_CLIENT') private readonly storage: StoragePort, @@ -367,6 +368,8 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { this.logger.warn('WebhookEventsProService not initialized'); return; } + if (this.alertCheckRunning) return; + this.alertCheckRunning = true; this.pruneCache(); @@ -406,6 +409,8 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { this.logger.error( `Alert check iteration failed: ${error instanceof Error ? error.message : 'Unknown error'}`, ); + } finally { + this.alertCheckRunning = false; } } } From 97cc8d3549229f41c0adbd9004f4100cbd090d67 Mon Sep 17 00:00:00 2001 From: Kristiyan Ivanov Date: Wed, 1 Apr 2026 10:59:48 +0300 Subject: [PATCH 20/20] fixed failing to save settings --- .../api/src/metric-forecasting/metric-forecasting.service.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/apps/api/src/metric-forecasting/metric-forecasting.service.ts b/apps/api/src/metric-forecasting/metric-forecasting.service.ts index fce368be..b096ad1a 100644 --- a/apps/api/src/metric-forecasting/metric-forecasting.service.ts +++ b/apps/api/src/metric-forecasting/metric-forecasting.service.ts @@ -218,9 +218,12 @@ export class MetricForecastingService implements OnModuleInit, OnModuleDestroy { updates: MetricForecastSettingsUpdate, ): Promise { const current = await this.getOrCreateSettings(connectionId, metricKind); + const defined = Object.fromEntries( + Object.entries(updates).filter(([, v]) => v !== undefined), + ); const merged: MetricForecastSettings = { ...current, - ...updates, + ...defined, connectionId, metricKind, updatedAt: Date.now(),