From 255a7abf988c0e124c0673b0ac3de4aef0dae110 Mon Sep 17 00:00:00 2001 From: Zach Boogher <129975920+AlrightLad@users.noreply.github.com> Date: Thu, 12 Feb 2026 23:05:15 -0500 Subject: [PATCH 1/9] Add Storage Growth Monitor script for Ninja RMM (Ticket 1123004) Implements the Storage Growth Tracking solution per TDD v2.5 (Final). This script runs daily via Ninja RMM on physical Hyper-V hosts and VMs, collecting drive metrics, calculating growth trends over a 60-day rolling window using linear regression, and reporting results to 16 Ninja custom fields. Critical trends trigger Windows Event Log entries (IDs 5001/5002) which Ninja monitors to auto-create Halo PSA tickets. Key capabilities: - Drive discovery via Win32_LogicalDisk with Win32_Volume filtering - OS drive auto-detection via Win32_OperatingSystem.SystemDrive - Exclusion of system partitions (Recovery, EFI, System Reserved, etc.) - OLS linear regression for GB/day growth rate and days-until-full - Status classification: Critical / Attention / Growing / Stable / Declining - Data drive priority ranking with top 3 reported to Ninja - Fire-once alerting with automatic reset when drives recover - JSON persistence with backup, corruption recovery, and 65-day pruning - Test mode auto-detection for local development without Ninja context - Verbose diagnostic mode (-Verbose) with regression detail output - Local log file with 90-day rotation --- rmm-ninja/ServerGrowthTracking | 1217 ++++++++++++++++++++++++++++++++ 1 file changed, 1217 insertions(+) create mode 100644 rmm-ninja/ServerGrowthTracking diff --git a/rmm-ninja/ServerGrowthTracking b/rmm-ninja/ServerGrowthTracking new file mode 100644 index 0000000..08a4b61 --- /dev/null +++ b/rmm-ninja/ServerGrowthTracking @@ -0,0 +1,1217 @@ +<# +.SYNOPSIS + Storage Growth Monitor - Tracks server storage growth trends over 60 days + and reports to Ninja RMM custom fields. + +.DESCRIPTION + Collects daily storage metrics from servers (physical Hyper-V hosts and VMs), + maintains a 60-day rolling history, calculates growth trends using linear + regression, and updates Ninja RMM custom fields with actionable insights. + Critical trends trigger Ninja event log entries for automated alerting and + Halo PSA ticket creation. + + Technical Design Document: Ticket 1123004 - DTC Internal + Version: 2.5 (Final) + +.PARAMETER Verbose + Enable detailed diagnostic output for troubleshooting. + +.EXAMPLE + .\Storage-Growth-Monitor.ps1 + Run in standard mode (Ninja or Test mode auto-detected). + +.EXAMPLE + .\Storage-Growth-Monitor.ps1 -Verbose + Run with verbose diagnostic output. +#> + +[CmdletBinding()] +param() + +# ============================================================================ +# CONSTANTS +# ============================================================================ +$Script:VERSION = "1.0" +$Script:STORAGE_PATH = "C:\ProgramData\NinjaRMM\StorageMetrics" +$Script:HISTORY_FILE = Join-Path $Script:STORAGE_PATH "storage_history.json" +$Script:BACKUP_FILE = Join-Path $Script:STORAGE_PATH "storage_history.json.bak" +$Script:LOG_FILE = Join-Path $Script:STORAGE_PATH "storage_monitor.log" +$Script:EVENT_SOURCE = "StorageGrowthMonitor" +$Script:RETENTION_DAYS = 65 +$Script:LOG_RETENTION_DAYS = 90 +$Script:OFFLINE_REMOVAL_DAYS = 30 +$Script:MIN_DATA_POINTS = 7 +$Script:FULL_CONFIDENCE_POINTS = 30 +$Script:MAX_DATA_DRIVES = 3 +$Script:DAYS_CAP = 1825 +$Script:MIN_DRIVE_SIZE_GB = 1 +$Script:CRITICAL_DAYS = 30 +$Script:ATTENTION_DAYS_LOW = 30 +$Script:ATTENTION_DAYS_HIGH = 90 +$Script:CRITICAL_USAGE_PERCENT = 95 +$Script:GROWING_THRESHOLD_GB_DAY = 0.1 +$Script:EXCLUDED_LABELS = @("Recovery", "EFI", "System Reserved", "SYSTEM", "Windows RE") +$Script:EXCLUDED_FILESYSTEMS = @("FAT", "FAT32", "RAW") +$Script:JSON_VERSION = "1.0" + +# ============================================================================ +# LOGGING +# ============================================================================ +$Script:LogBuffer = [System.Collections.ArrayList]::new() + +function Get-TimestampString { + $now = Get-Date + $tz = [System.TimeZoneInfo]::Local + $tzAbbr = if ($now.IsDaylightSavingTime()) { + $dn = $tz.DaylightName + ($dn -split '\s' | ForEach-Object { $_[0] }) -join '' + } else { + $sn = $tz.StandardName + ($sn -split '\s' | ForEach-Object { $_[0] }) -join '' + } + return "[{0} {1}]" -f ($now.ToString("yyyy-MM-dd HH:mm:ss")), $tzAbbr +} + +function Write-Log { + param( + [string]$Message, + [switch]$IsVerbose + ) + if ($IsVerbose -and $VerbosePreference -ne 'Continue') { return } + + $ts = Get-TimestampString + $line = if ($Message -eq '') { + "$ts " + } elseif ($IsVerbose) { + "$ts [VERBOSE] $Message" + } else { + "$ts $Message" + } + + Write-Host $line + [void]$Script:LogBuffer.Add($line) +} + +function Write-VerboseLog { + param([string]$Message) + if ($VerbosePreference -eq 'Continue') { + Write-Log -Message $Message -IsVerbose + } +} + +# ============================================================================ +# LOG FILE MANAGEMENT (Section 14) +# ============================================================================ +function Save-LogFile { + try { + $existingLines = @() + if (Test-Path $Script:LOG_FILE) { + $existingLines = @(Get-Content -Path $Script:LOG_FILE -ErrorAction SilentlyContinue) + } + + # Prune entries older than 90 days + $cutoff = (Get-Date).AddDays(-$Script:LOG_RETENTION_DAYS) + $prunedLines = [System.Collections.ArrayList]::new() + $removedCount = 0 + + foreach ($line in $existingLines) { + if ($line -match '^\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})') { + $parsedDate = $null + if ([DateTime]::TryParseExact($Matches[1], "yyyy-MM-dd HH:mm:ss", $null, [System.Globalization.DateTimeStyles]::None, [ref]$parsedDate)) { + if ($parsedDate -lt $cutoff) { + $removedCount++ + continue + } + } + } + # Keep line if timestamp unparseable or within retention + [void]$prunedLines.Add($line) + } + + if ($removedCount -gt 0) { + Write-VerboseLog "Log file pruning: $removedCount entries removed (older than $($Script:LOG_RETENTION_DAYS) days)" + } + + # Append new entries + foreach ($line in $Script:LogBuffer) { + [void]$prunedLines.Add($line) + } + + $prunedLines | Set-Content -Path $Script:LOG_FILE -Encoding UTF8 -ErrorAction Stop + } + catch { + Write-Host "$(Get-TimestampString) ERROR: Failed to write log file: $_" + } +} + +# ============================================================================ +# JSON PERSISTENCE (Section 6) +# ============================================================================ +function New-EmptyHistory { + return [ordered]@{ + version = $Script:JSON_VERSION + deviceId = $env:COMPUTERNAME + lastUpdated = (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") + excessDriveAlertSent = $false + drives = [ordered]@{} + } +} + +function Load-History { + $maxRetries = 3 + $retryDelay = 5 + + if (-not (Test-Path $Script:HISTORY_FILE)) { + Write-VerboseLog "Existing JSON: No - creating new history" + return New-EmptyHistory + } + + $fileInfo = Get-Item $Script:HISTORY_FILE -ErrorAction SilentlyContinue + Write-VerboseLog "Existing JSON: Yes ($([math]::Round($fileInfo.Length / 1KB)) KB)" + + for ($attempt = 1; $attempt -le $maxRetries; $attempt++) { + try { + $content = Get-Content -Path $Script:HISTORY_FILE -Raw -Encoding UTF8 -ErrorAction Stop + $data = $content | ConvertFrom-Json -ErrorAction Stop + + # Validate JSON structure + if (-not $data.version -or -not $data.drives) { + throw "Invalid JSON structure - missing version or drives" + } + + # Convert PSCustomObject to ordered hashtable for manipulation + $history = [ordered]@{ + version = $data.version + deviceId = if ($data.deviceId) { $data.deviceId } else { $env:COMPUTERNAME } + lastUpdated = if ($data.lastUpdated) { $data.lastUpdated } else { (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") } + excessDriveAlertSent = if ($null -ne $data.excessDriveAlertSent) { [bool]$data.excessDriveAlertSent } else { $false } + drives = [ordered]@{} + } + + foreach ($prop in $data.drives.PSObject.Properties) { + $driveLetter = $prop.Name + $driveData = $prop.Value + + $historyEntries = [System.Collections.ArrayList]::new() + if ($driveData.history) { + foreach ($entry in $driveData.history) { + [void]$historyEntries.Add([ordered]@{ + timestamp = $entry.timestamp + usedGB = [double]$entry.usedGB + freeGB = [double]$entry.freeGB + usagePercent = [double]$entry.usagePercent + }) + } + } + + $history.drives[$driveLetter] = [ordered]@{ + volumeLabel = if ($driveData.volumeLabel) { $driveData.volumeLabel } else { "" } + totalSizeGB = [double]$driveData.totalSizeGB + driveType = if ($driveData.driveType) { $driveData.driveType } else { "Data" } + alertSent = if ($null -ne $driveData.alertSent) { [bool]$driveData.alertSent } else { $false } + status = if ($driveData.status) { $driveData.status } else { "Online" } + lastSeen = if ($driveData.lastSeen) { $driveData.lastSeen } else { (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") } + history = $historyEntries + } + } + + return $history + } + catch { + if ($attempt -lt $maxRetries) { + Write-Log "WARNING: Failed to load history (attempt $attempt/$maxRetries), retrying in ${retryDelay}s..." + Start-Sleep -Seconds $retryDelay + } + else { + Write-Log "WARNING: History file corrupted after $maxRetries attempts. Renaming and starting fresh." + $corruptedPath = $Script:HISTORY_FILE + ".corrupted" + try { + Move-Item -Path $Script:HISTORY_FILE -Destination $corruptedPath -Force -ErrorAction Stop + } + catch { + Write-Log "WARNING: Could not rename corrupted file: $_" + } + return New-EmptyHistory + } + } + } +} + +function Save-History { + param([hashtable]$History) + + # Backup existing file + if (Test-Path $Script:HISTORY_FILE) { + try { + Copy-Item -Path $Script:HISTORY_FILE -Destination $Script:BACKUP_FILE -Force -ErrorAction Stop + } + catch { + Write-Log "WARNING: Could not create backup: $_" + } + } + + # Update lastUpdated + $History.lastUpdated = (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") + + try { + $History | ConvertTo-Json -Depth 10 | Set-Content -Path $Script:HISTORY_FILE -Encoding UTF8 -ErrorAction Stop + + # Count total data points + $totalPoints = 0 + $driveCount = 0 + foreach ($drive in $History.drives.Values) { + $driveCount++ + $totalPoints += $drive.history.Count + } + Write-Log ([char]0x2713 + " History file saved ($driveCount drives, $totalPoints data points)") + } + catch { + Write-Log "ERROR: Failed to save history file: $_" + } +} + +# ============================================================================ +# DRIVE DISCOVERY & FILTERING (Section 5) +# ============================================================================ +function Get-FilteredDrives { + # Detect OS drive (Section 5.3) + $osDriveLetter = (Get-CimInstance Win32_OperatingSystem -ErrorAction SilentlyContinue).SystemDrive + if (-not $osDriveLetter) { + Write-Log "WARNING: Could not detect OS drive, falling back to C:" + $osDriveLetter = "C:" + } + Write-VerboseLog "OS Drive detected: $osDriveLetter" + + # Primary source - Win32_LogicalDisk (Section 5.1) + $logicalDisks = @(Get-CimInstance -ClassName Win32_LogicalDisk -Filter "DriveType=3" -ErrorAction Stop) + Write-VerboseLog "Drive Discovery: $($logicalDisks.Count) drives found" + + # Secondary source - Win32_Volume for filtering metadata only (Section 5.1) + $volumes = $null + try { + $volumes = @(Get-CimInstance -ClassName Win32_Volume -ErrorAction Stop) + Write-VerboseLog "Win32_Volume query: Success" + } + catch { + Write-Log "WARNING: Win32_Volume query failed - continuing with LogicalDisk only" + Write-VerboseLog "Win32_Volume query: Failed - $_" + } + + $filteredDrives = [System.Collections.ArrayList]::new() + + foreach ($disk in $logicalDisks) { + $letter = $disk.DeviceID # e.g., "C:" + $sizeGB = [math]::Round($disk.Size / 1GB, 3) + $label = $disk.VolumeName + + # Find matching volume for additional metadata + $matchingVolume = $null + if ($volumes) { + $matchingVolume = $volumes | Where-Object { + $_.DriveLetter -eq $letter + } | Select-Object -First 1 + } + + $fileSystem = if ($matchingVolume -and $matchingVolume.FileSystem) { + $matchingVolume.FileSystem + } else { "" } + + # Use volume label from Volume if LogicalDisk doesn't have one + if (-not $label -and $matchingVolume -and $matchingVolume.Label) { + $label = $matchingVolume.Label + } + + # --- Exclusion checks (Section 5.2) --- + + # Size < 1 GB + if ($sizeGB -lt $Script:MIN_DRIVE_SIZE_GB) { + Write-VerboseLog " $letter DriveType=3 Size=${sizeGB}GB - EXCLUDED (Size < 1GB)" + continue + } + + # Excluded volume labels + $labelExcluded = $false + if ($label) { + foreach ($excludedLabel in $Script:EXCLUDED_LABELS) { + if ($label -ieq $excludedLabel) { + Write-VerboseLog " $letter DriveType=3 Size=${sizeGB}GB Label=`"$label`" - EXCLUDED ($excludedLabel partition)" + $labelExcluded = $true + break + } + } + } + if ($labelExcluded) { continue } + + # Excluded file systems (FAT, FAT32, RAW) + if ($fileSystem -and $Script:EXCLUDED_FILESYSTEMS -contains $fileSystem) { + Write-VerboseLog " $letter DriveType=3 Size=${sizeGB}GB - EXCLUDED (FileSystem: $fileSystem)" + continue + } + + # Drive passed all filters - classify and collect metrics + $isOS = ($letter -eq $osDriveLetter) + $driveType = if ($isOS) { "OS" } else { "Data" } + + Write-VerboseLog " $letter DriveType=3 Size=${sizeGB}GB - INCLUDED ($driveType)" + + $usedBytes = $disk.Size - $disk.FreeSpace + $usedGB = [math]::Round($usedBytes / 1GB, 3) + $freeGB = [math]::Round($disk.FreeSpace / 1GB, 3) + $usagePercent = if ($disk.Size -gt 0) { + [math]::Round(($usedBytes / $disk.Size) * 100, 2) + } else { 0 } + + Write-VerboseLog "Drive $letter Raw values - Total: $($sizeGB.ToString('F3')) Used: $($usedGB.ToString('F3')) Free: $($freeGB.ToString('F3')) Percent: $($usagePercent.ToString('F3'))%" + + [void]$filteredDrives.Add(@{ + Letter = $letter + VolumeLabel = if ($label) { $label } else { "" } + TotalSizeGB = $sizeGB + UsedGB = $usedGB + FreeGB = $freeGB + UsagePercent = $usagePercent + DriveType = $driveType + IsOS = $isOS + }) + } + + return $filteredDrives +} + +# ============================================================================ +# HISTORY UPDATE & PRUNING (Section 6) +# ============================================================================ +function Update-History { + param( + [hashtable]$History, + [array]$CurrentDrives + ) + + $now = (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") + $cutoffDate = (Get-Date).AddDays(-$Script:RETENTION_DAYS) + $visibleLetters = @($CurrentDrives | ForEach-Object { $_.Letter }) + + # Update/add visible drives + foreach ($drive in $CurrentDrives) { + $letter = $drive.Letter + + if (-not $History.drives.ContainsKey($letter)) { + # New drive - create entry + $History.drives[$letter] = [ordered]@{ + volumeLabel = $drive.VolumeLabel + totalSizeGB = $drive.TotalSizeGB + driveType = $drive.DriveType + alertSent = $false + status = "Online" + lastSeen = $now + history = [System.Collections.ArrayList]::new() + } + } + else { + # Existing drive - update metadata + $existingDrive = $History.drives[$letter] + $existingDrive.status = "Online" + $existingDrive.lastSeen = $now + $existingDrive.driveType = $drive.DriveType + $existingDrive.volumeLabel = $drive.VolumeLabel + + # Detect disk resize (Section 5.5) + if ($existingDrive.totalSizeGB -ne $drive.TotalSizeGB) { + Write-Log "Drive ${letter}: disk size changed from $($existingDrive.totalSizeGB) GB to $($drive.TotalSizeGB) GB" + Write-VerboseLog "Drive ${letter}: Size change detected - Old: $($existingDrive.totalSizeGB) GB, New: $($drive.TotalSizeGB) GB" + $existingDrive.totalSizeGB = $drive.TotalSizeGB + } + } + + # Append new data point + [void]$History.drives[$letter].history.Add([ordered]@{ + timestamp = $now + usedGB = $drive.UsedGB + freeGB = $drive.FreeGB + usagePercent = $drive.UsagePercent + }) + + # Prune entries older than 65-day retention window + $driveHistory = $History.drives[$letter].history + $beforeCount = $driveHistory.Count + $prunedHistory = [System.Collections.ArrayList]::new() + foreach ($entry in $driveHistory) { + $entryDate = [DateTime]::Parse($entry.timestamp) + if ($entryDate -ge $cutoffDate) { + [void]$prunedHistory.Add($entry) + } + } + $History.drives[$letter].history = $prunedHistory + $afterCount = $prunedHistory.Count + + if ($beforeCount -ne $afterCount) { + Write-VerboseLog "Drive ${letter}: Pruned $($beforeCount - $afterCount) entries older than $($Script:RETENTION_DAYS) days" + } + + if ($prunedHistory.Count -gt 0) { + $oldest = $prunedHistory[0].timestamp.Substring(0, 10) + $newest = $prunedHistory[$prunedHistory.Count - 1].timestamp.Substring(0, 10) + Write-VerboseLog "Drive ${letter}: History - $($prunedHistory.Count) points loaded, $afterCount after pruning (oldest: $oldest, newest: $newest)" + } + } + + # Handle drives in history that are NOT currently visible (Section 7) + $drivesToRemove = [System.Collections.ArrayList]::new() + + foreach ($letter in @($History.drives.Keys)) { + if ($letter -notin $visibleLetters) { + $driveData = $History.drives[$letter] + + if ($driveData.status -ne "Offline") { + Write-VerboseLog "Drive ${letter}: No longer visible - marking Offline" + $driveData.status = "Offline" + } + + # Remove if offline > 30 days (Section 7.1) + if ($driveData.lastSeen) { + $lastSeenDate = [DateTime]::Parse($driveData.lastSeen) + $daysOffline = ((Get-Date) - $lastSeenDate).TotalDays + if ($daysOffline -gt $Script:OFFLINE_REMOVAL_DAYS) { + Write-Log "Drive ${letter}: Offline for $([math]::Round($daysOffline, 0)) days - removing from history" + [void]$drivesToRemove.Add($letter) + } + } + } + } + + foreach ($letter in $drivesToRemove) { + $History.drives.Remove($letter) + } + + return $History +} + +# ============================================================================ +# LINEAR REGRESSION (Section 8) +# ============================================================================ +function Get-LinearRegression { + param( + [System.Collections.ArrayList]$HistoryData + ) + + $n = $HistoryData.Count + if ($n -lt 2) { + return @{ Slope = 0; Intercept = 0; RSquared = 0 } + } + + # Convert timestamps to days from first measurement (Section 8.1) + $firstTimestamp = [DateTime]::Parse($HistoryData[0].timestamp) + + $sumX = 0.0 + $sumY = 0.0 + $sumXY = 0.0 + $sumX2 = 0.0 + $sumY2 = 0.0 + + foreach ($point in $HistoryData) { + $x = ([DateTime]::Parse($point.timestamp) - $firstTimestamp).TotalDays + $y = [double]$point.usedGB + + $sumX += $x + $sumY += $y + $sumXY += ($x * $y) + $sumX2 += ($x * $x) + $sumY2 += ($y * $y) + } + + # OLS formula (Section 8.3) + $denominator = ($n * $sumX2) - ($sumX * $sumX) + if ([math]::Abs($denominator) -lt 1e-10) { + return @{ Slope = 0; Intercept = $sumY / $n; RSquared = 0 } + } + + $slope = (($n * $sumXY) - ($sumX * $sumY)) / $denominator + $intercept = ($sumY - ($slope * $sumX)) / $n + + # Calculate R-squared for trend confidence + $meanY = $sumY / $n + $ssTot = $sumY2 - ($n * $meanY * $meanY) + $ssRes = 0.0 + foreach ($point in $HistoryData) { + $x = ([DateTime]::Parse($point.timestamp) - $firstTimestamp).TotalDays + $y = [double]$point.usedGB + $predicted = $slope * $x + $intercept + $ssRes += ($y - $predicted) * ($y - $predicted) + } + $rSquared = if ($ssTot -gt 0) { 1 - ($ssRes / $ssTot) } else { 0 } + + return @{ + Slope = $slope # GB per day + Intercept = $intercept + RSquared = [math]::Round($rSquared, 2) + } +} + +# ============================================================================ +# TREND CALCULATION & STATUS CLASSIFICATION (Sections 8, 9) +# ============================================================================ +function Get-DriveAnalysis { + param( + [hashtable]$DriveData, + [string]$DriveLetter + ) + + $result = @{ + Letter = $DriveLetter + VolumeLabel = $DriveData.volumeLabel + TotalSizeGB = $DriveData.totalSizeGB + DriveType = $DriveData.driveType + Status = "" + GBPerMonth = "" + DaysUntilFull = "" + AlertSent = $DriveData.alertSent + DriveStatus = $DriveData.status + IsLimited = $false + NumericDays = $null + RawGrowthPerDay = 0 + CurrentUsedGB = 0 + CurrentFreeGB = 0 + CurrentPercent = 0 + } + + # Handle offline drives (Section 7.2) + if ($DriveData.status -eq "Offline") { + $result.Status = "Offline" + $result.GBPerMonth = "OFFLINE" + $result.DaysUntilFull = "OFFLINE" + return $result + } + + $pointCount = $DriveData.history.Count + + # Populate current metrics from latest data point if available + if ($pointCount -gt 0) { + $latest = $DriveData.history[$pointCount - 1] + $result.CurrentUsedGB = [double]$latest.usedGB + $result.CurrentFreeGB = [double]$latest.freeGB + $result.CurrentPercent = [double]$latest.usagePercent + } + + # Check minimum data points (Section 8.2) + if ($pointCount -lt $Script:MIN_DATA_POINTS) { + $result.Status = "Insufficient Data" + $result.GBPerMonth = "Insufficient Data" + $result.DaysUntilFull = "Insufficient Data" + Write-VerboseLog "Drive ${DriveLetter}: $pointCount data points - Insufficient Data" + return $result + } + + $isLimited = $pointCount -lt $Script:FULL_CONFIDENCE_POINTS + $result.IsLimited = $isLimited + + # Run linear regression (Section 8.1) + $regression = Get-LinearRegression -HistoryData $DriveData.history + $dailyGrowth = $regression.Slope + $monthlyGrowth = [math]::Round($dailyGrowth * 30, 3) + $result.RawGrowthPerDay = $dailyGrowth + + Write-VerboseLog "Drive ${DriveLetter}: Regression - slope=$([math]::Round($dailyGrowth, 4)) GB/day, R`u{00B2}=$($regression.RSquared)" + + $result.GBPerMonth = $monthlyGrowth.ToString("F3") + + # Get current free space for days-until-full calculation + $currentFreeGB = $result.CurrentFreeGB + $currentUsagePercent = $result.CurrentPercent + + # Calculate days until full (Section 8.4, 9.1) + if ($dailyGrowth -le 0) { + if ($dailyGrowth -eq 0) { + $result.DaysUntilFull = "No Growth" + } else { + $result.DaysUntilFull = "Declining" + $result.GBPerMonth = $monthlyGrowth.ToString("F3") + } + } else { + $daysUntilFull = $currentFreeGB / $dailyGrowth + if ($daysUntilFull -gt $Script:DAYS_CAP) { + $daysUntilFull = $Script:DAYS_CAP + } + $daysUntilFull = [math]::Round($daysUntilFull, 2) + $result.DaysUntilFull = $daysUntilFull.ToString("F2") + $result.NumericDays = $daysUntilFull + } + + # Status classification - first match wins (Section 9.2) + # Priority 3: Critical - Days < 30 OR usage > 95% + $isCritical = $false + if ($currentUsagePercent -gt $Script:CRITICAL_USAGE_PERCENT) { + $isCritical = $true + } + if ($null -ne $result.NumericDays -and $result.NumericDays -lt $Script:CRITICAL_DAYS) { + $isCritical = $true + } + + if ($isCritical) { + $result.Status = "Critical" + } + # Priority 4: Attention - Days 30-90 inclusive + elseif ($null -ne $result.NumericDays -and $result.NumericDays -ge $Script:ATTENTION_DAYS_LOW -and $result.NumericDays -le $Script:ATTENTION_DAYS_HIGH) { + $result.Status = "Attention" + } + # Priority 5: Declining - negative growth rate + elseif ($dailyGrowth -lt 0) { + $result.Status = "Declining" + } + # Priority 6: Growing - >= 0.1 GB/day AND days > 90 + elseif ($dailyGrowth -ge $Script:GROWING_THRESHOLD_GB_DAY -and ($null -eq $result.NumericDays -or $result.NumericDays -gt $Script:ATTENTION_DAYS_HIGH)) { + $result.Status = "Growing" + } + # Priority 7: Stable - < 0.1 GB/day OR zero growth + else { + $result.Status = "Stable" + } + + # Append (Limited) indicator if 7-30 data points (Section 9.4) + if ($isLimited) { + $result.Status = "$($result.Status) (Limited)" + } + + Write-VerboseLog "Drive ${DriveLetter}: Data point count: $pointCount, Status: $($result.Status)" + + return $result +} + +# ============================================================================ +# PRIORITY RANKING (Section 9.5) +# ============================================================================ +function Get-StatusSortPriority { + param([string]$Status) + $baseStatus = $Status -replace '\s*\(Limited\)', '' + switch ($baseStatus) { + "Critical" { return 1 } + "Attention" { return 2 } + "Growing" { return 3 } + "Stable" { return 4 } + "Declining" { return 5 } + "Insufficient Data" { return 6 } + "Offline" { return 7 } + default { return 8 } + } +} + +function Get-ServerStatusSeverity { + param([string]$Status) + # Section 9.7: Critical > Attention > Growing > Stable > Declining > Insufficient Data + $baseStatus = $Status -replace '\s*\(Limited\)', '' + switch ($baseStatus) { + "Critical" { return 6 } + "Attention" { return 5 } + "Growing" { return 4 } + "Stable" { return 3 } + "Declining" { return 2 } + "Insufficient Data" { return 1 } + default { return 0 } + } +} + +function Sort-DataDrives { + param([array]$Analyses) + + # Separate online and offline (Section 9.5) + $online = @($Analyses | Where-Object { $_.DriveStatus -ne "Offline" }) + $offline = @($Analyses | Where-Object { $_.DriveStatus -eq "Offline" }) + + # Sort online drives by: status priority, then numeric days-until-full, then letter + $sorted = @($online | Sort-Object -Property @( + @{ Expression = { Get-StatusSortPriority $_.Status }; Ascending = $true }, + @{ Expression = { + if ($null -ne $_.NumericDays) { $_.NumericDays } + else { [double]::MaxValue } + }; Ascending = $true }, + @{ Expression = { $_.Letter }; Ascending = $true } + )) + + # Append offline sorted alphabetically + $offlineSorted = @($offline | Sort-Object -Property Letter) + + $result = @() + if ($sorted.Count -gt 0) { $result += $sorted } + if ($offlineSorted.Count -gt 0) { $result += $offlineSorted } + + return $result +} + +# ============================================================================ +# ALERTING (Section 11) +# ============================================================================ +function Initialize-EventSource { + try { + if (-not [System.Diagnostics.EventLog]::SourceExists($Script:EVENT_SOURCE)) { + New-EventLog -LogName Application -Source $Script:EVENT_SOURCE -ErrorAction Stop + Write-VerboseLog "Event log source '$($Script:EVENT_SOURCE)' registered" + } + } + catch { + Write-Log "WARNING: Could not register event log source: $_" + Write-Log "WARNING: Event log writing will be skipped" + return $false + } + return $true +} + +function Write-CriticalAlert { + param( + [array]$CriticalDrives, + [string]$Hostname + ) + + if ($CriticalDrives.Count -eq 0) { return } + + # Section 11.4 message formats + if ($CriticalDrives.Count -eq 1) { + $d = $CriticalDrives[0] + $daysText = if ($d.DaysUntilFull -match '^\d') { "$($d.DaysUntilFull) days until full" } else { $d.DaysUntilFull } + $message = "STORAGE CRITICAL: Server $Hostname - Drive $($d.Letter) has $daysText ($($d.GBPerMonth) GB/month growth rate). Immediate attention required." + } + else { + $lines = "STORAGE CRITICAL: Server $Hostname - Multiple drives require attention:`r`n" + foreach ($d in $CriticalDrives) { + $daysText = if ($d.DaysUntilFull -match '^\d') { "$($d.DaysUntilFull) days until full" } else { $d.DaysUntilFull } + $lines += "- Drive $($d.Letter): $daysText ($($d.GBPerMonth) GB/month)`r`n" + } + $message = $lines + "Immediate attention required." + } + + try { + Write-EventLog -LogName Application -Source $Script:EVENT_SOURCE -EventId 5001 -EntryType Warning -Message $message -ErrorAction Stop + Write-Log "! Critical alert written to Event Log (ID 5001)" + } + catch { + Write-Log "ERROR: Failed to write Event 5001: $_" + } +} + +function Write-ExcessDriveAlert { + param( + [int]$DriveCount, + [array]$ExcludedDrives, + [string]$Hostname + ) + + $excludedList = ($ExcludedDrives | ForEach-Object { $_.Letter }) -join ", " + $message = "STORAGE MONITORING: Server $Hostname has $DriveCount data drives but only 3 can be reported. Excluded drives: $excludedList. Script update may be required." + + try { + Write-EventLog -LogName Application -Source $Script:EVENT_SOURCE -EventId 5002 -EntryType Warning -Message $message -ErrorAction Stop + Write-Log "! Excess drive alert written to Event Log (ID 5002)" + } + catch { + Write-Log "ERROR: Failed to write Event 5002: $_" + } +} + +# ============================================================================ +# NINJA RMM INTEGRATION (Section 10) +# ============================================================================ +function Update-NinjaFields { + param( + [string]$ServerStatus, + [hashtable]$OSAnalysis, + [array]$DataDriveSlots + ) + + $runningInNinja = $null -ne (Get-Command "Ninja-Property-Set" -ErrorAction SilentlyContinue) + if (-not $runningInNinja) { return $false } + + try { + # Overall Status (Section 10.1) + Ninja-Property-Set "Server Storage Status" $ServerStatus + + # OS Drive fields + if ($OSAnalysis) { + Ninja-Property-Set "OS Drive Status" $OSAnalysis.Status + Ninja-Property-Set "OS Drive GB per Month" $OSAnalysis.GBPerMonth + Ninja-Property-Set "OS Drive Days Until Full" $OSAnalysis.DaysUntilFull + } + else { + Ninja-Property-Set "OS Drive Status" "NO DRIVE" + Ninja-Property-Set "OS Drive GB per Month" "NO DRIVE" + Ninja-Property-Set "OS Drive Days Until Full" "NO DRIVE" + } + + # Data Drive 1-3 fields (Section 10.4) + for ($i = 0; $i -lt $Script:MAX_DATA_DRIVES; $i++) { + $slotNum = $i + 1 + $slot = if ($i -lt $DataDriveSlots.Count) { $DataDriveSlots[$i] } else { $null } + + if ($null -ne $slot -and $slot.Status -ne "NO DRIVE") { + # Letter display: strip colon for Ninja, add (OFFLINE) if offline + $letterDisplay = if ($slot.DriveStatus -eq "Offline") { + "$($slot.Letter -replace ':$', '') (OFFLINE)" + } else { + $slot.Letter -replace ':$', '' + } + + Ninja-Property-Set "Data Drive $slotNum Letter" $letterDisplay + Ninja-Property-Set "Data Drive $slotNum Status" $slot.Status + Ninja-Property-Set "Data Drive $slotNum GB per Month" $slot.GBPerMonth + Ninja-Property-Set "Data Drive $slotNum Days Until Full" $slot.DaysUntilFull + } + else { + # Empty slot (Section 10.2) + Ninja-Property-Set "Data Drive $slotNum Letter" "NO DRIVE" + Ninja-Property-Set "Data Drive $slotNum Status" "NO DRIVE" + Ninja-Property-Set "Data Drive $slotNum GB per Month" "NO DRIVE" + Ninja-Property-Set "Data Drive $slotNum Days Until Full" "NO DRIVE" + } + } + + return $true + } + catch { + Write-Log "ERROR: Failed to update Ninja fields: $_" + return $false + } +} + +# ============================================================================ +# CONSOLE OUTPUT (Section 19) +# ============================================================================ +function Write-Summary { + param( + [string]$Hostname, + [hashtable]$OSAnalysis, + [array]$DataDriveSlots, + [string]$ServerStatus, + [bool]$IsNinja, + [array]$NewCriticalDrives + ) + + Write-Log "Storage Growth Analysis - $Hostname" + Write-Log ([char]0x2550 * 63) + Write-Log "" + + # OS Drive section + if ($OSAnalysis) { + Write-Log "OS DRIVE (Auto-detected: $($OSAnalysis.Letter))" + $labelDisplay = if ($OSAnalysis.VolumeLabel) { $OSAnalysis.VolumeLabel } else { $OSAnalysis.Letter } + Write-Log " Drive $($OSAnalysis.Letter) ($labelDisplay)" + + # Show current usage if we have data points + if ($OSAnalysis.CurrentUsedGB -gt 0 -or $OSAnalysis.CurrentFreeGB -gt 0) { + $usedStr = $OSAnalysis.CurrentUsedGB.ToString("F3") + $totalStr = $OSAnalysis.TotalSizeGB.ToString("F3") + $pctStr = $OSAnalysis.CurrentPercent.ToString("F2") + Write-Log " Current: $usedStr GB / $totalStr GB ($pctStr%)" + } + + $baseStatus = $OSAnalysis.Status -replace '\s*\(Limited\)', '' + if ($baseStatus -ne "Insufficient Data" -and $baseStatus -ne "Offline") { + if ($OSAnalysis.GBPerMonth -ne "Insufficient Data" -and $OSAnalysis.GBPerMonth -ne "OFFLINE") { + Write-Log " Growth: $($OSAnalysis.GBPerMonth) GB/month" + + $daysDisplay = $OSAnalysis.DaysUntilFull + if ($daysDisplay -eq "1825.00") { + $daysDisplay = "1825.00 days until full - capped" + } elseif ($daysDisplay -match '^\d') { + $daysDisplay = "$daysDisplay days until full" + } + Write-Log " Status: $($OSAnalysis.Status) ($daysDisplay)" + } + } + else { + Write-Log " Status: $($OSAnalysis.Status)" + } + } + + Write-Log "" + Write-Log "DATA DRIVES (Ranked by Criticality)" + Write-Log ([char]0x2500 * 63) + + for ($i = 0; $i -lt $Script:MAX_DATA_DRIVES; $i++) { + $slotNum = $i + 1 + $slot = if ($i -lt $DataDriveSlots.Count) { $DataDriveSlots[$i] } else { $null } + + if ($null -ne $slot -and $slot.Status -ne "NO DRIVE") { + $labelDisplay = if ($slot.VolumeLabel) { $slot.VolumeLabel } else { $slot.Letter } + Write-Log " [$slotNum] Drive $($slot.Letter) ($labelDisplay)" + + if ($slot.DriveStatus -eq "Offline") { + Write-Log " Status: OFFLINE" + } + elseif (($slot.Status -replace '\s*\(Limited\)', '') -eq "Insufficient Data") { + Write-Log " Status: $($slot.Status)" + } + else { + # Show current usage + if ($slot.CurrentUsedGB -gt 0 -or $slot.CurrentFreeGB -gt 0) { + $usedStr = ([double]$slot.CurrentUsedGB).ToString("F3") + $totalStr = ([double]$slot.TotalSizeGB).ToString("F3") + $pctStr = ([double]$slot.CurrentPercent).ToString("F2") + Write-Log " Current: $usedStr GB / $totalStr GB ($pctStr%)" + } + + Write-Log " Growth: $($slot.GBPerMonth) GB/month" + + $statusUpper = ($slot.Status -replace '\s*\(Limited\)', '').ToUpper() + $limitedTag = if ($slot.IsLimited) { " (Limited)" } else { "" } + $daysDisplay = $slot.DaysUntilFull + if ($daysDisplay -eq "1825.00") { + $daysDisplay = "1825.00 days until full - capped" + } elseif ($daysDisplay -match '^\d') { + $daysDisplay = "$daysDisplay days until full" + } + Write-Log " Status: $statusUpper$limitedTag ($daysDisplay)" + + # Alert indicator for newly critical drives + $baseSlotStatus = $slot.Status -replace '\s*\(Limited\)', '' + if ($baseSlotStatus -eq "Critical" -and $NewCriticalDrives) { + $isNewCritical = $slot.Letter -in @($NewCriticalDrives | ForEach-Object { $_.Letter }) + if ($isNewCritical) { + Write-Log " Alert: NEW - Event 5001 written" + } + } + } + } + else { + Write-Log " [$slotNum] NO DRIVE" + } + Write-Log "" + } + + Write-Log ([char]0x2550 * 63) + Write-Log "SERVER STATUS: $($ServerStatus.ToUpper())" + Write-Log "" +} + +# ============================================================================ +# MAIN EXECUTION (Section 12) +# ============================================================================ +function Main { + $exitCode = 0 + $hostname = $env:COMPUTERNAME + $runningInNinja = $null -ne (Get-Command "Ninja-Property-Set" -ErrorAction SilentlyContinue) + + # ── 1. INITIALIZE ──────────────────────────────────────────────────────── + + # Test mode banner (Section 13) + if (-not $runningInNinja) { + Write-Log "*** TEST MODE - Not running in Ninja context ***" + Write-Log "Ninja custom field updates will be skipped." + Write-Log "" + } + + # Verbose initialization info (Section 14.4) + Write-VerboseLog "PowerShell Version: $($PSVersionTable.PSVersion.ToString())" + Write-VerboseLog "Script Version: $($Script:VERSION)" + Write-VerboseLog "Storage Path: $($Script:STORAGE_PATH)" + + # Create storage folder (exit 1 on failure - Section 15.2) + if (-not (Test-Path $Script:STORAGE_PATH)) { + try { + New-Item -Path $Script:STORAGE_PATH -ItemType Directory -Force -ErrorAction Stop | Out-Null + Write-VerboseLog "Created storage folder: $($Script:STORAGE_PATH)" + } + catch { + Write-Log "CRITICAL: Cannot create storage folder: $_" + Write-Log "Script cannot proceed. Exiting." + Save-LogFile + exit 1 + } + } + + # Register event log source (Section 11.3) + $eventLogAvailable = Initialize-EventSource + + # Load existing history + $history = Load-History + + # Capture previous excess-drive alert state for fire-once logic (Section 11.2) + $previousExcessAlertSent = [bool]$history.excessDriveAlertSent + + # ── 2. DISCOVER & COLLECT ──────────────────────────────────────────────── + + $currentDrives = $null + try { + $currentDrives = @(Get-FilteredDrives) + } + catch { + Write-Log "ERROR: Drive enumeration failed: $_" + Write-Log "Preserving existing data, skipping collection." + Save-History -History $history + Save-LogFile + exit 0 + } + + if ($currentDrives.Count -eq 0) { + Write-Log "WARNING: No qualifying drives found." + } + + # ── 3. UPDATE DRIVE STATUS ─────────────────────────────────────────────── + + $history = Update-History -History $history -CurrentDrives $currentDrives + + # ── 5-6. CALCULATE TRENDS ─────────────────────────────────────────────── + + $osAnalysis = $null + $dataAnalyses = [System.Collections.ArrayList]::new() + + foreach ($letter in @($history.drives.Keys)) { + $driveData = $history.drives[$letter] + $analysis = Get-DriveAnalysis -DriveData $driveData -DriveLetter $letter + + if ($driveData.driveType -eq "OS") { + $osAnalysis = $analysis + } + else { + [void]$dataAnalyses.Add($analysis) + } + } + + # ── 7. RANK & ORGANIZE ────────────────────────────────────────────────── + + $sortedDataDrives = @(Sort-DataDrives -Analyses $dataAnalyses) + + # ── 4. CHECK DRIVE COUNT (Section 10.3) ───────────────────────────────── + + $onlineDataCount = @($dataAnalyses | Where-Object { $_.DriveStatus -ne "Offline" }).Count + $excludedDrives = @() + $reportedDataDrives = $sortedDataDrives + + if ($sortedDataDrives.Count -gt $Script:MAX_DATA_DRIVES) { + $reportedDataDrives = @($sortedDataDrives[0..($Script:MAX_DATA_DRIVES - 1)]) + $excludedDrives = @($sortedDataDrives[$Script:MAX_DATA_DRIVES..($sortedDataDrives.Count - 1)]) + + Write-Log "NOTE: $($sortedDataDrives.Count) data drives detected, reporting top $($Script:MAX_DATA_DRIVES)" + foreach ($excl in $excludedDrives) { + Write-Log " Excluded: $($excl.Letter)" + } + } + + # Update excess-drive alert flag (Section 11.2 - Event 5002 fire-once) + if ($onlineDataCount -gt $Script:MAX_DATA_DRIVES) { + $history.excessDriveAlertSent = $true + } + else { + $history.excessDriveAlertSent = $false + } + + # Fire Event 5002 only on NEW transition (was false, now true) + if ($onlineDataCount -gt $Script:MAX_DATA_DRIVES -and -not $previousExcessAlertSent -and $eventLogAvailable) { + Write-ExcessDriveAlert -DriveCount $onlineDataCount -ExcludedDrives $excludedDrives -Hostname $hostname + } + + # Pad data drive slots to 3 (Section 10.2) + $dataSlots = [System.Collections.ArrayList]::new() + foreach ($d in $reportedDataDrives) { + [void]$dataSlots.Add($d) + } + while ($dataSlots.Count -lt $Script:MAX_DATA_DRIVES) { + [void]$dataSlots.Add(@{ + Letter = "NO DRIVE" + VolumeLabel = "" + Status = "NO DRIVE" + GBPerMonth = "NO DRIVE" + DaysUntilFull = "NO DRIVE" + DriveStatus = "NO DRIVE" + IsLimited = $false + CurrentUsedGB = 0 + CurrentFreeGB = 0 + CurrentPercent = 0 + TotalSizeGB = 0 + }) + } + + # Determine overall server status - worst case among online drives (Section 9.7) + $worstSeverity = 0 + $serverStatus = "Insufficient Data" + + $allOnlineAnalyses = @() + if ($osAnalysis -and $osAnalysis.DriveStatus -ne "Offline") { $allOnlineAnalyses += $osAnalysis } + $allOnlineAnalyses += @($dataAnalyses | Where-Object { $_.DriveStatus -ne "Offline" }) + + foreach ($analysis in $allOnlineAnalyses) { + $severity = Get-ServerStatusSeverity -Status $analysis.Status + if ($severity -gt $worstSeverity) { + $worstSeverity = $severity + $serverStatus = $analysis.Status -replace '\s*\(Limited\)', '' + } + } + + # ── 8. CHECK FOR CRITICAL - Fire-Once Logic (Section 11.2) ────────────── + + $newCriticalDrives = [System.Collections.ArrayList]::new() + + foreach ($letter in @($history.drives.Keys)) { + $driveData = $history.drives[$letter] + + # Find matching analysis result + $analysis = $null + if ($osAnalysis -and $osAnalysis.Letter -eq $letter) { + $analysis = $osAnalysis + } + else { + $analysis = $dataAnalyses | Where-Object { $_.Letter -eq $letter } | Select-Object -First 1 + } + + if (-not $analysis) { continue } + + $baseStatus = $analysis.Status -replace '\s*\(Limited\)', '' + + if ($baseStatus -eq "Critical") { + if (-not $driveData.alertSent) { + # Transition TO Critical - fire alert + [void]$newCriticalDrives.Add($analysis) + $driveData.alertSent = $true + Write-VerboseLog "Drive ${letter}: alertSent was FALSE, setting to TRUE, writing Event 5001" + } + else { + # Already alerted - skip + Write-VerboseLog "Drive ${letter}: alertSent was TRUE, skipping Event 5001" + } + } + else { + # No longer Critical - reset flag for next incident + if ($driveData.alertSent) { + $driveData.alertSent = $false + Write-VerboseLog "Drive ${letter}: No longer Critical, resetting alertSent to FALSE" + } + } + } + + # Write combined Event 5001 if any drives transitioned to Critical (Section 11.4) + if ($newCriticalDrives.Count -gt 0 -and $eventLogAvailable) { + Write-CriticalAlert -CriticalDrives $newCriticalDrives -Hostname $hostname + } + + # ── 9. PERSIST & REPORT ───────────────────────────────────────────────── + + # Console output summary + Write-Summary -Hostname $hostname -OSAnalysis $osAnalysis -DataDriveSlots $dataSlots ` + -ServerStatus $serverStatus -IsNinja $runningInNinja -NewCriticalDrives $newCriticalDrives + + # Update Ninja custom fields or show test mode message + if ($runningInNinja) { + $ninjaSuccess = Update-NinjaFields -ServerStatus $serverStatus -OSAnalysis $osAnalysis -DataDriveSlots $dataSlots + if ($ninjaSuccess) { + Write-Log ([char]0x2713 + " CUSTOM FIELDS FILLED") + Write-Log " - OS Drive: $(if ($osAnalysis) { $osAnalysis.Letter } else { 'NO DRIVE' })" + for ($i = 0; $i -lt $Script:MAX_DATA_DRIVES; $i++) { + $slot = $dataSlots[$i] + $display = if ($slot.Status -eq "NO DRIVE") { "NO DRIVE" } else { $slot.Letter } + Write-Log " - Data Drive $($i + 1): $display" + } + } + } + else { + Write-Log "*** TEST MODE - Ninja fields not updated ***" + } + + # Save history JSON + Save-History -History $history + + # Write log file with rotation + Save-LogFile + + exit $exitCode +} + +# ============================================================================ +# ENTRY POINT +# ============================================================================ +Main From 814ce475b5601bb30b2bf18ae7652f7b7be70d26 Mon Sep 17 00:00:00 2001 From: Zach Boogher <129975920+AlrightLad@users.noreply.github.com> Date: Thu, 12 Feb 2026 23:20:11 -0500 Subject: [PATCH 2/9] Add Ninja RMM field constants and improve timestamp parsing Added constants for Ninja RMM fields and improved timestamp parsing for better error handling. Updated functions to utilize new constants and enhanced backup recovery logic. --- rmm-ninja/ServerGrowthTracking | 305 +++++++++++++++++++++------------ 1 file changed, 196 insertions(+), 109 deletions(-) diff --git a/rmm-ninja/ServerGrowthTracking b/rmm-ninja/ServerGrowthTracking index 08a4b61..879ca7b 100644 --- a/rmm-ninja/ServerGrowthTracking +++ b/rmm-ninja/ServerGrowthTracking @@ -54,6 +54,16 @@ $Script:EXCLUDED_LABELS = @("Recovery", "EFI", "System Reserved", "SYSTEM", "Win $Script:EXCLUDED_FILESYSTEMS = @("FAT", "FAT32", "RAW") $Script:JSON_VERSION = "1.0" +# Ninja RMM field name constants (Section 10) +$Script:FIELD_SERVER_STATUS = "Server Storage Status" +$Script:FIELD_OS_STATUS = "OS Drive Status" +$Script:FIELD_OS_GROWTH = "OS Drive GB per Month" +$Script:FIELD_OS_DAYS = "OS Drive Days Until Full" +$Script:FIELD_DATA_LETTER = "Data Drive {0} Letter" +$Script:FIELD_DATA_STATUS = "Data Drive {0} Status" +$Script:FIELD_DATA_GROWTH = "Data Drive {0} GB per Month" +$Script:FIELD_DATA_DAYS = "Data Drive {0} Days Until Full" + # ============================================================================ # LOGGING # ============================================================================ @@ -144,6 +154,23 @@ function Save-LogFile { } } +# ============================================================================ +# SAFE TIMESTAMP PARSING HELPER +# ============================================================================ +function ConvertTo-SafeDateTime { + <# + .SYNOPSIS + Safely parses a timestamp string, returning $null on failure instead of throwing. + #> + param([string]$Timestamp) + + $parsed = $null + if ([DateTime]::TryParse($Timestamp, [ref]$parsed)) { + return $parsed + } + return $null +} + # ============================================================================ # JSON PERSISTENCE (Section 6) # ============================================================================ @@ -157,6 +184,65 @@ function New-EmptyHistory { } } +function Import-HistoryFromFile { + <# + .SYNOPSIS + Attempts to parse a history JSON file. Returns $null on failure. + #> + param([string]$FilePath) + + if (-not (Test-Path $FilePath)) { return $null } + + try { + $content = Get-Content -Path $FilePath -Raw -Encoding UTF8 -ErrorAction Stop + $data = $content | ConvertFrom-Json -ErrorAction Stop + + if (-not $data.version -or -not $data.drives) { + throw "Invalid JSON structure - missing version or drives" + } + + $history = [ordered]@{ + version = $data.version + deviceId = if ($data.deviceId) { $data.deviceId } else { $env:COMPUTERNAME } + lastUpdated = if ($data.lastUpdated) { $data.lastUpdated } else { (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") } + excessDriveAlertSent = if ($null -ne $data.excessDriveAlertSent) { [bool]$data.excessDriveAlertSent } else { $false } + drives = [ordered]@{} + } + + foreach ($prop in $data.drives.PSObject.Properties) { + $driveLetter = $prop.Name + $driveData = $prop.Value + + $historyEntries = [System.Collections.ArrayList]::new() + if ($driveData.history) { + foreach ($entry in $driveData.history) { + [void]$historyEntries.Add([ordered]@{ + timestamp = $entry.timestamp + usedGB = [double]$entry.usedGB + freeGB = [double]$entry.freeGB + usagePercent = [double]$entry.usagePercent + }) + } + } + + $history.drives[$driveLetter] = [ordered]@{ + volumeLabel = if ($driveData.volumeLabel) { $driveData.volumeLabel } else { "" } + totalSizeGB = [double]$driveData.totalSizeGB + driveType = if ($driveData.driveType) { $driveData.driveType } else { "Data" } + alertSent = if ($null -ne $driveData.alertSent) { [bool]$driveData.alertSent } else { $false } + status = if ($driveData.status) { $driveData.status } else { "Online" } + lastSeen = if ($driveData.lastSeen) { $driveData.lastSeen } else { (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") } + history = $historyEntries + } + } + + return $history + } + catch { + return $null + } +} + function Load-History { $maxRetries = 3 $retryDelay = 5 @@ -170,77 +256,44 @@ function Load-History { Write-VerboseLog "Existing JSON: Yes ($([math]::Round($fileInfo.Length / 1KB)) KB)" for ($attempt = 1; $attempt -le $maxRetries; $attempt++) { - try { - $content = Get-Content -Path $Script:HISTORY_FILE -Raw -Encoding UTF8 -ErrorAction Stop - $data = $content | ConvertFrom-Json -ErrorAction Stop - - # Validate JSON structure - if (-not $data.version -or -not $data.drives) { - throw "Invalid JSON structure - missing version or drives" - } - - # Convert PSCustomObject to ordered hashtable for manipulation - $history = [ordered]@{ - version = $data.version - deviceId = if ($data.deviceId) { $data.deviceId } else { $env:COMPUTERNAME } - lastUpdated = if ($data.lastUpdated) { $data.lastUpdated } else { (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") } - excessDriveAlertSent = if ($null -ne $data.excessDriveAlertSent) { [bool]$data.excessDriveAlertSent } else { $false } - drives = [ordered]@{} - } - - foreach ($prop in $data.drives.PSObject.Properties) { - $driveLetter = $prop.Name - $driveData = $prop.Value - - $historyEntries = [System.Collections.ArrayList]::new() - if ($driveData.history) { - foreach ($entry in $driveData.history) { - [void]$historyEntries.Add([ordered]@{ - timestamp = $entry.timestamp - usedGB = [double]$entry.usedGB - freeGB = [double]$entry.freeGB - usagePercent = [double]$entry.usagePercent - }) - } - } - - $history.drives[$driveLetter] = [ordered]@{ - volumeLabel = if ($driveData.volumeLabel) { $driveData.volumeLabel } else { "" } - totalSizeGB = [double]$driveData.totalSizeGB - driveType = if ($driveData.driveType) { $driveData.driveType } else { "Data" } - alertSent = if ($null -ne $driveData.alertSent) { [bool]$driveData.alertSent } else { $false } - status = if ($driveData.status) { $driveData.status } else { "Online" } - lastSeen = if ($driveData.lastSeen) { $driveData.lastSeen } else { (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") } - history = $historyEntries - } - } + $result = Import-HistoryFromFile -FilePath $Script:HISTORY_FILE + if ($null -ne $result) { + return $result + } - return $history + if ($attempt -lt $maxRetries) { + Write-Log "WARNING: Failed to load history (attempt $attempt/$maxRetries), retrying in ${retryDelay}s..." + Start-Sleep -Seconds $retryDelay } - catch { - if ($attempt -lt $maxRetries) { - Write-Log "WARNING: Failed to load history (attempt $attempt/$maxRetries), retrying in ${retryDelay}s..." - Start-Sleep -Seconds $retryDelay - } - else { - Write-Log "WARNING: History file corrupted after $maxRetries attempts. Renaming and starting fresh." - $corruptedPath = $Script:HISTORY_FILE + ".corrupted" - try { - Move-Item -Path $Script:HISTORY_FILE -Destination $corruptedPath -Force -ErrorAction Stop - } - catch { - Write-Log "WARNING: Could not rename corrupted file: $_" - } - return New-EmptyHistory - } + } + + # Primary file corrupted after all retries - attempt backup recovery + if (Test-Path $Script:BACKUP_FILE) { + Write-Log "WARNING: Primary history corrupted. Attempting backup recovery..." + $backupResult = Import-HistoryFromFile -FilePath $Script:BACKUP_FILE + if ($null -ne $backupResult) { + Write-Log "Backup recovery successful - restored from $($Script:BACKUP_FILE)" + return $backupResult } + Write-Log "WARNING: Backup file also corrupted." + } + + # Both primary and backup failed - rename corrupted file and start fresh + Write-Log "WARNING: History unrecoverable after $maxRetries attempts. Renaming and starting fresh." + $corruptedPath = $Script:HISTORY_FILE + ".corrupted" + try { + Move-Item -Path $Script:HISTORY_FILE -Destination $corruptedPath -Force -ErrorAction Stop } + catch { + Write-Log "WARNING: Could not rename corrupted file: $_" + } + return New-EmptyHistory } function Save-History { param([hashtable]$History) - # Backup existing file + # Backup existing file before overwriting if (Test-Path $Script:HISTORY_FILE) { try { Copy-Item -Path $Script:HISTORY_FILE -Destination $Script:BACKUP_FILE -Force -ErrorAction Stop @@ -250,13 +303,15 @@ function Save-History { } } - # Update lastUpdated $History.lastUpdated = (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") + # Atomic write: write to temp file first, then rename to prevent corruption + $tempFile = $Script:HISTORY_FILE + ".tmp" + try { - $History | ConvertTo-Json -Depth 10 | Set-Content -Path $Script:HISTORY_FILE -Encoding UTF8 -ErrorAction Stop + $History | ConvertTo-Json -Depth 10 | Set-Content -Path $tempFile -Encoding UTF8 -ErrorAction Stop + Move-Item -Path $tempFile -Destination $Script:HISTORY_FILE -Force -ErrorAction Stop - # Count total data points $totalPoints = 0 $driveCount = 0 foreach ($drive in $History.drives.Values) { @@ -267,6 +322,10 @@ function Save-History { } catch { Write-Log "ERROR: Failed to save history file: $_" + # Clean up temp file if it exists + if (Test-Path $tempFile) { + Remove-Item -Path $tempFile -Force -ErrorAction SilentlyContinue + } } } @@ -431,12 +490,16 @@ function Update-History { usagePercent = $drive.UsagePercent }) - # Prune entries older than 65-day retention window + # Prune entries older than 65-day retention window (defensive parsing) $driveHistory = $History.drives[$letter].history $beforeCount = $driveHistory.Count $prunedHistory = [System.Collections.ArrayList]::new() foreach ($entry in $driveHistory) { - $entryDate = [DateTime]::Parse($entry.timestamp) + $entryDate = ConvertTo-SafeDateTime -Timestamp $entry.timestamp + if ($null -eq $entryDate) { + Write-VerboseLog "Drive ${letter}: Skipping entry with unparseable timestamp: $($entry.timestamp)" + continue + } if ($entryDate -ge $cutoffDate) { [void]$prunedHistory.Add($entry) } @@ -467,13 +530,18 @@ function Update-History { $driveData.status = "Offline" } - # Remove if offline > 30 days (Section 7.1) + # Remove if offline > 30 days (Section 7.1) - defensive parsing if ($driveData.lastSeen) { - $lastSeenDate = [DateTime]::Parse($driveData.lastSeen) - $daysOffline = ((Get-Date) - $lastSeenDate).TotalDays - if ($daysOffline -gt $Script:OFFLINE_REMOVAL_DAYS) { - Write-Log "Drive ${letter}: Offline for $([math]::Round($daysOffline, 0)) days - removing from history" - [void]$drivesToRemove.Add($letter) + $lastSeenDate = ConvertTo-SafeDateTime -Timestamp $driveData.lastSeen + if ($null -ne $lastSeenDate) { + $daysOffline = ((Get-Date) - $lastSeenDate).TotalDays + if ($daysOffline -gt $Script:OFFLINE_REMOVAL_DAYS) { + Write-Log "Drive ${letter}: Offline for $([math]::Round($daysOffline, 0)) days - removing from history" + [void]$drivesToRemove.Add($letter) + } + } + else { + Write-VerboseLog "Drive ${letter}: Unparseable lastSeen timestamp: $($driveData.lastSeen)" } } } @@ -499,17 +567,25 @@ function Get-LinearRegression { return @{ Slope = 0; Intercept = 0; RSquared = 0 } } - # Convert timestamps to days from first measurement (Section 8.1) - $firstTimestamp = [DateTime]::Parse($HistoryData[0].timestamp) + # Convert timestamps to days from first measurement (Section 8.1) - defensive parsing + $firstTimestamp = ConvertTo-SafeDateTime -Timestamp $HistoryData[0].timestamp + if ($null -eq $firstTimestamp) { + Write-VerboseLog "Linear regression: Cannot parse first timestamp, returning zero slope" + return @{ Slope = 0; Intercept = 0; RSquared = 0 } + } $sumX = 0.0 $sumY = 0.0 $sumXY = 0.0 $sumX2 = 0.0 $sumY2 = 0.0 + $validPoints = 0 foreach ($point in $HistoryData) { - $x = ([DateTime]::Parse($point.timestamp) - $firstTimestamp).TotalDays + $pointDate = ConvertTo-SafeDateTime -Timestamp $point.timestamp + if ($null -eq $pointDate) { continue } + + $x = ($pointDate - $firstTimestamp).TotalDays $y = [double]$point.usedGB $sumX += $x @@ -517,23 +593,31 @@ function Get-LinearRegression { $sumXY += ($x * $y) $sumX2 += ($x * $x) $sumY2 += ($y * $y) + $validPoints++ + } + + if ($validPoints -lt 2) { + return @{ Slope = 0; Intercept = 0; RSquared = 0 } } # OLS formula (Section 8.3) - $denominator = ($n * $sumX2) - ($sumX * $sumX) + $denominator = ($validPoints * $sumX2) - ($sumX * $sumX) if ([math]::Abs($denominator) -lt 1e-10) { - return @{ Slope = 0; Intercept = $sumY / $n; RSquared = 0 } + return @{ Slope = 0; Intercept = $sumY / $validPoints; RSquared = 0 } } - $slope = (($n * $sumXY) - ($sumX * $sumY)) / $denominator - $intercept = ($sumY - ($slope * $sumX)) / $n + $slope = (($validPoints * $sumXY) - ($sumX * $sumY)) / $denominator + $intercept = ($sumY - ($slope * $sumX)) / $validPoints # Calculate R-squared for trend confidence - $meanY = $sumY / $n - $ssTot = $sumY2 - ($n * $meanY * $meanY) + $meanY = $sumY / $validPoints + $ssTot = $sumY2 - ($validPoints * $meanY * $meanY) $ssRes = 0.0 foreach ($point in $HistoryData) { - $x = ([DateTime]::Parse($point.timestamp) - $firstTimestamp).TotalDays + $pointDate = ConvertTo-SafeDateTime -Timestamp $point.timestamp + if ($null -eq $pointDate) { continue } + + $x = ($pointDate - $firstTimestamp).TotalDays $y = [double]$point.usedGB $predicted = $slope * $x + $intercept $ssRes += ($y - $predicted) * ($y - $predicted) @@ -624,7 +708,6 @@ function Get-DriveAnalysis { $result.DaysUntilFull = "No Growth" } else { $result.DaysUntilFull = "Declining" - $result.GBPerMonth = $monthlyGrowth.ToString("F3") } } else { $daysUntilFull = $currentFreeGB / $dailyGrowth @@ -820,18 +903,18 @@ function Update-NinjaFields { try { # Overall Status (Section 10.1) - Ninja-Property-Set "Server Storage Status" $ServerStatus + Ninja-Property-Set $Script:FIELD_SERVER_STATUS $ServerStatus # OS Drive fields if ($OSAnalysis) { - Ninja-Property-Set "OS Drive Status" $OSAnalysis.Status - Ninja-Property-Set "OS Drive GB per Month" $OSAnalysis.GBPerMonth - Ninja-Property-Set "OS Drive Days Until Full" $OSAnalysis.DaysUntilFull + Ninja-Property-Set $Script:FIELD_OS_STATUS $OSAnalysis.Status + Ninja-Property-Set $Script:FIELD_OS_GROWTH $OSAnalysis.GBPerMonth + Ninja-Property-Set $Script:FIELD_OS_DAYS $OSAnalysis.DaysUntilFull } else { - Ninja-Property-Set "OS Drive Status" "NO DRIVE" - Ninja-Property-Set "OS Drive GB per Month" "NO DRIVE" - Ninja-Property-Set "OS Drive Days Until Full" "NO DRIVE" + Ninja-Property-Set $Script:FIELD_OS_STATUS "NO DRIVE" + Ninja-Property-Set $Script:FIELD_OS_GROWTH "NO DRIVE" + Ninja-Property-Set $Script:FIELD_OS_DAYS "NO DRIVE" } # Data Drive 1-3 fields (Section 10.4) @@ -839,6 +922,11 @@ function Update-NinjaFields { $slotNum = $i + 1 $slot = if ($i -lt $DataDriveSlots.Count) { $DataDriveSlots[$i] } else { $null } + $letterField = $Script:FIELD_DATA_LETTER -f $slotNum + $statusField = $Script:FIELD_DATA_STATUS -f $slotNum + $growthField = $Script:FIELD_DATA_GROWTH -f $slotNum + $daysField = $Script:FIELD_DATA_DAYS -f $slotNum + if ($null -ne $slot -and $slot.Status -ne "NO DRIVE") { # Letter display: strip colon for Ninja, add (OFFLINE) if offline $letterDisplay = if ($slot.DriveStatus -eq "Offline") { @@ -847,17 +935,17 @@ function Update-NinjaFields { $slot.Letter -replace ':$', '' } - Ninja-Property-Set "Data Drive $slotNum Letter" $letterDisplay - Ninja-Property-Set "Data Drive $slotNum Status" $slot.Status - Ninja-Property-Set "Data Drive $slotNum GB per Month" $slot.GBPerMonth - Ninja-Property-Set "Data Drive $slotNum Days Until Full" $slot.DaysUntilFull + Ninja-Property-Set $letterField $letterDisplay + Ninja-Property-Set $statusField $slot.Status + Ninja-Property-Set $growthField $slot.GBPerMonth + Ninja-Property-Set $daysField $slot.DaysUntilFull } else { # Empty slot (Section 10.2) - Ninja-Property-Set "Data Drive $slotNum Letter" "NO DRIVE" - Ninja-Property-Set "Data Drive $slotNum Status" "NO DRIVE" - Ninja-Property-Set "Data Drive $slotNum GB per Month" "NO DRIVE" - Ninja-Property-Set "Data Drive $slotNum Days Until Full" "NO DRIVE" + Ninja-Property-Set $letterField "NO DRIVE" + Ninja-Property-Set $statusField "NO DRIVE" + Ninja-Property-Set $growthField "NO DRIVE" + Ninja-Property-Set $daysField "NO DRIVE" } } @@ -983,11 +1071,10 @@ function Write-Summary { # MAIN EXECUTION (Section 12) # ============================================================================ function Main { - $exitCode = 0 $hostname = $env:COMPUTERNAME $runningInNinja = $null -ne (Get-Command "Ninja-Property-Set" -ErrorAction SilentlyContinue) - # ── 1. INITIALIZE ──────────────────────────────────────────────────────── + # ── Step 1: Initialize ─────────────────────────────────────────────────── # Test mode banner (Section 13) if (-not $runningInNinja) { @@ -1024,7 +1111,7 @@ function Main { # Capture previous excess-drive alert state for fire-once logic (Section 11.2) $previousExcessAlertSent = [bool]$history.excessDriveAlertSent - # ── 2. DISCOVER & COLLECT ──────────────────────────────────────────────── + # ── Step 2: Discover & Collect ─────────────────────────────────────────── $currentDrives = $null try { @@ -1042,11 +1129,11 @@ function Main { Write-Log "WARNING: No qualifying drives found." } - # ── 3. UPDATE DRIVE STATUS ─────────────────────────────────────────────── + # ── Step 3: Update Drive Status ────────────────────────────────────────── $history = Update-History -History $history -CurrentDrives $currentDrives - # ── 5-6. CALCULATE TRENDS ─────────────────────────────────────────────── + # ── Step 4: Calculate Trends ───────────────────────────────────────────── $osAnalysis = $null $dataAnalyses = [System.Collections.ArrayList]::new() @@ -1063,11 +1150,11 @@ function Main { } } - # ── 7. RANK & ORGANIZE ────────────────────────────────────────────────── + # ── Step 5: Rank & Organize ────────────────────────────────────────────── $sortedDataDrives = @(Sort-DataDrives -Analyses $dataAnalyses) - # ── 4. CHECK DRIVE COUNT (Section 10.3) ───────────────────────────────── + # ── Step 6: Check Drive Count (Section 10.3) ──────────────────────────── $onlineDataCount = @($dataAnalyses | Where-Object { $_.DriveStatus -ne "Offline" }).Count $excludedDrives = @() @@ -1133,7 +1220,7 @@ function Main { } } - # ── 8. CHECK FOR CRITICAL - Fire-Once Logic (Section 11.2) ────────────── + # ── Step 7: Check Critical - Fire-Once Logic (Section 11.2) ───────────── $newCriticalDrives = [System.Collections.ArrayList]::new() @@ -1179,7 +1266,7 @@ function Main { Write-CriticalAlert -CriticalDrives $newCriticalDrives -Hostname $hostname } - # ── 9. PERSIST & REPORT ───────────────────────────────────────────────── + # ── Step 8: Persist & Report ───────────────────────────────────────────── # Console output summary Write-Summary -Hostname $hostname -OSAnalysis $osAnalysis -DataDriveSlots $dataSlots ` @@ -1208,7 +1295,7 @@ function Main { # Write log file with rotation Save-LogFile - exit $exitCode + exit 0 } # ============================================================================ From abef598e743a18ee15d2e946f023259545c6f24a Mon Sep 17 00:00:00 2001 From: Zach Boogher <129975920+AlrightLad@users.noreply.github.com> Date: Thu, 12 Feb 2026 23:42:13 -0500 Subject: [PATCH 3/9] Improve history file loading with retry logic Enhanced error handling for history file loading and parsing, including retry logic for I/O errors and improved logging messages. --- rmm-ninja/ServerGrowthTracking | 44 ++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/rmm-ninja/ServerGrowthTracking b/rmm-ninja/ServerGrowthTracking index 879ca7b..586d145 100644 --- a/rmm-ninja/ServerGrowthTracking +++ b/rmm-ninja/ServerGrowthTracking @@ -239,14 +239,12 @@ function Import-HistoryFromFile { return $history } catch { + Write-VerboseLog "Import-HistoryFromFile: Failed to parse '$FilePath': $_" return $null } } function Load-History { - $maxRetries = 3 - $retryDelay = 5 - if (-not (Test-Path $Script:HISTORY_FILE)) { Write-VerboseLog "Existing JSON: No - creating new history" return New-EmptyHistory @@ -255,21 +253,39 @@ function Load-History { $fileInfo = Get-Item $Script:HISTORY_FILE -ErrorAction SilentlyContinue Write-VerboseLog "Existing JSON: Yes ($([math]::Round($fileInfo.Length / 1KB)) KB)" + # Attempt to read and parse, retrying only on I/O errors + $maxRetries = 3 + $retryDelay = 5 + $content = $null + for ($attempt = 1; $attempt -le $maxRetries; $attempt++) { + try { + $content = Get-Content -Path $Script:HISTORY_FILE -Raw -Encoding UTF8 -ErrorAction Stop + break # I/O succeeded, move on to parsing + } + catch { + Write-Log "WARNING: Failed to read history file (attempt $attempt/$maxRetries): $_" + if ($attempt -lt $maxRetries) { + Start-Sleep -Seconds $retryDelay + } + } + } + + # If I/O succeeded, attempt parse (no retry - parse errors are not transient) + if ($null -ne $content) { $result = Import-HistoryFromFile -FilePath $Script:HISTORY_FILE if ($null -ne $result) { return $result } - - if ($attempt -lt $maxRetries) { - Write-Log "WARNING: Failed to load history (attempt $attempt/$maxRetries), retrying in ${retryDelay}s..." - Start-Sleep -Seconds $retryDelay - } + Write-Log "WARNING: Primary history file failed to parse." + } + else { + Write-Log "WARNING: Could not read primary history file after $maxRetries attempts." } - # Primary file corrupted after all retries - attempt backup recovery + # Primary file failed - attempt backup recovery if (Test-Path $Script:BACKUP_FILE) { - Write-Log "WARNING: Primary history corrupted. Attempting backup recovery..." + Write-Log "WARNING: Attempting backup recovery..." $backupResult = Import-HistoryFromFile -FilePath $Script:BACKUP_FILE if ($null -ne $backupResult) { Write-Log "Backup recovery successful - restored from $($Script:BACKUP_FILE)" @@ -279,7 +295,7 @@ function Load-History { } # Both primary and backup failed - rename corrupted file and start fresh - Write-Log "WARNING: History unrecoverable after $maxRetries attempts. Renaming and starting fresh." + Write-Log "WARNING: History unrecoverable. Renaming and starting fresh." $corruptedPath = $Script:HISTORY_FILE + ".corrupted" try { Move-Item -Path $Script:HISTORY_FILE -Destination $corruptedPath -Force -ErrorAction Stop @@ -512,8 +528,10 @@ function Update-History { } if ($prunedHistory.Count -gt 0) { - $oldest = $prunedHistory[0].timestamp.Substring(0, 10) - $newest = $prunedHistory[$prunedHistory.Count - 1].timestamp.Substring(0, 10) + $oldestTs = $prunedHistory[0].timestamp + $newestTs = $prunedHistory[$prunedHistory.Count - 1].timestamp + $oldest = if ($oldestTs.Length -ge 10) { $oldestTs.Substring(0, 10) } else { $oldestTs } + $newest = if ($newestTs.Length -ge 10) { $newestTs.Substring(0, 10) } else { $newestTs } Write-VerboseLog "Drive ${letter}: History - $($prunedHistory.Count) points loaded, $afterCount after pruning (oldest: $oldest, newest: $newest)" } } From 68013a95d30e038cd9c63d3f52c417c1e920eaba Mon Sep 17 00:00:00 2001 From: Zach Boogher <129975920+AlrightLad@users.noreply.github.com> Date: Thu, 12 Feb 2026 23:54:28 -0500 Subject: [PATCH 4/9] Refactor log file management and JSON import functions Here's an extended description you can use for your PR commit or description: --- **Address all CodeRabbit review feedback for Storage Growth Monitor** This change resolves all actionable issues and nitpicks raised across three rounds of CodeRabbit review on the Storage Growth Monitor script (Ticket 1123004). **Robustness improvements:** - **Safe timestamp parsing:** Introduced `ConvertTo-SafeDateTime` helper using `TryParse` instead of `[DateTime]::Parse()` throughout the script, preventing script termination on malformed persisted timestamps. Applied to history pruning, offline drive removal, and linear regression calculations. - **Substring length guard:** Added length check before `.Substring(0, 10)` on timestamps in the history pruning log output, preventing `ArgumentOutOfRangeException` on unexpectedly short values. - **Backup recovery:** `Load-History` now checks for a backup file and attempts restoration before falling back to a fresh history when the primary file is corrupted. **Architecture improvements:** - **Eliminated redundant I/O in Load-History:** Extracted `ConvertFrom-HistoryJson` to accept raw JSON content. `Load-History` now reads the file with retry protection for transient I/O errors, then passes the already-read content directly to parsing (no retry for parse errors, which are not transient). Previously, the retry loop read `$content` but `Import-HistoryFromFile` independently re-opened the file, discarding the retried read entirely. - **Import-HistoryFromFile** remains as a thin read-and-parse wrapper used by backup recovery, delegating parsing to `ConvertFrom-HistoryJson`. - **Ninja RMM field constants:** Defined all 16 field names as `$Script:FIELD_*` constants instead of inline string literals, reducing fragility and making field name changes single-point edits. **Reliability improvements:** - **Atomic write fallback:** `Save-History` uses temp file + `Move-Item` for atomic writes. If `Move-Item` fails after the temp file is successfully written, it now falls back to a direct `Set-Content` write so the new data point isn't lost, then cleans up the temp file. - **Log file performance:** `Save-LogFile` switched from read-prune-rewrite on every run to append-only (`Add-Content`). The expensive prune cycle only triggers when the log file exceeds 256KB, which is appropriate given the script runs daily and generates ~20-50 lines per execution. - **Diagnostic logging in Import-HistoryFromFile:** The catch block now logs parse failure details via `Write-VerboseLog` instead of silently returning `$null`. **Correctness fix:** - **Drive enumeration exit code:** Changed from `exit 0` to `exit 2` when `Get-FilteredDrives` throws an exception, so Ninja RMM can detect non-transient failures instead of treating them as successful runs. --- rmm-ninja/ServerGrowthTracking | 105 +++++++++++++++++++++------------ 1 file changed, 66 insertions(+), 39 deletions(-) diff --git a/rmm-ninja/ServerGrowthTracking b/rmm-ninja/ServerGrowthTracking index 586d145..a7c289b 100644 --- a/rmm-ninja/ServerGrowthTracking +++ b/rmm-ninja/ServerGrowthTracking @@ -113,41 +113,40 @@ function Write-VerboseLog { # LOG FILE MANAGEMENT (Section 14) # ============================================================================ function Save-LogFile { + $Script:LOG_PRUNE_THRESHOLD_KB = 256 + try { - $existingLines = @() - if (Test-Path $Script:LOG_FILE) { - $existingLines = @(Get-Content -Path $Script:LOG_FILE -ErrorAction SilentlyContinue) + # Append new entries (fast path - no read required) + if ($Script:LogBuffer.Count -gt 0) { + $Script:LogBuffer | Add-Content -Path $Script:LOG_FILE -Encoding UTF8 -ErrorAction Stop } - # Prune entries older than 90 days - $cutoff = (Get-Date).AddDays(-$Script:LOG_RETENTION_DAYS) - $prunedLines = [System.Collections.ArrayList]::new() - $removedCount = 0 - - foreach ($line in $existingLines) { - if ($line -match '^\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})') { - $parsedDate = $null - if ([DateTime]::TryParseExact($Matches[1], "yyyy-MM-dd HH:mm:ss", $null, [System.Globalization.DateTimeStyles]::None, [ref]$parsedDate)) { - if ($parsedDate -lt $cutoff) { - $removedCount++ - continue + # Prune only when file exceeds size threshold to avoid read-rewrite on every run + $fileInfo = Get-Item $Script:LOG_FILE -ErrorAction SilentlyContinue + if ($fileInfo -and ($fileInfo.Length / 1KB) -gt $Script:LOG_PRUNE_THRESHOLD_KB) { + $cutoff = (Get-Date).AddDays(-$Script:LOG_RETENTION_DAYS) + $existingLines = @(Get-Content -Path $Script:LOG_FILE -ErrorAction Stop) + $prunedLines = [System.Collections.ArrayList]::new() + $removedCount = 0 + + foreach ($line in $existingLines) { + if ($line -match '^\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})') { + $parsedDate = $null + if ([DateTime]::TryParseExact($Matches[1], "yyyy-MM-dd HH:mm:ss", $null, [System.Globalization.DateTimeStyles]::None, [ref]$parsedDate)) { + if ($parsedDate -lt $cutoff) { + $removedCount++ + continue + } } } + [void]$prunedLines.Add($line) } - # Keep line if timestamp unparseable or within retention - [void]$prunedLines.Add($line) - } - if ($removedCount -gt 0) { - Write-VerboseLog "Log file pruning: $removedCount entries removed (older than $($Script:LOG_RETENTION_DAYS) days)" - } - - # Append new entries - foreach ($line in $Script:LogBuffer) { - [void]$prunedLines.Add($line) + if ($removedCount -gt 0) { + $prunedLines | Set-Content -Path $Script:LOG_FILE -Encoding UTF8 -ErrorAction Stop + Write-VerboseLog "Log file pruning: $removedCount entries removed (older than $($Script:LOG_RETENTION_DAYS) days)" + } } - - $prunedLines | Set-Content -Path $Script:LOG_FILE -Encoding UTF8 -ErrorAction Stop } catch { Write-Host "$(Get-TimestampString) ERROR: Failed to write log file: $_" @@ -184,18 +183,15 @@ function New-EmptyHistory { } } -function Import-HistoryFromFile { +function ConvertFrom-HistoryJson { <# .SYNOPSIS - Attempts to parse a history JSON file. Returns $null on failure. + Parses raw JSON content into a history hashtable. Returns $null on failure. #> - param([string]$FilePath) - - if (-not (Test-Path $FilePath)) { return $null } + param([string]$JsonContent, [string]$SourceLabel = "unknown") try { - $content = Get-Content -Path $FilePath -Raw -Encoding UTF8 -ErrorAction Stop - $data = $content | ConvertFrom-Json -ErrorAction Stop + $data = $JsonContent | ConvertFrom-Json -ErrorAction Stop if (-not $data.version -or -not $data.drives) { throw "Invalid JSON structure - missing version or drives" @@ -239,7 +235,26 @@ function Import-HistoryFromFile { return $history } catch { - Write-VerboseLog "Import-HistoryFromFile: Failed to parse '$FilePath': $_" + Write-VerboseLog "ConvertFrom-HistoryJson: Failed to parse content from $SourceLabel`: $_" + return $null + } +} + +function Import-HistoryFromFile { + <# + .SYNOPSIS + Reads a history JSON file from disk and parses it. Returns $null on failure. + #> + param([string]$FilePath) + + if (-not (Test-Path $FilePath)) { return $null } + + try { + $content = Get-Content -Path $FilePath -Raw -Encoding UTF8 -ErrorAction Stop + return ConvertFrom-HistoryJson -JsonContent $content -SourceLabel $FilePath + } + catch { + Write-VerboseLog "Import-HistoryFromFile: Failed to read '$FilePath': $_" return $null } } @@ -273,7 +288,7 @@ function Load-History { # If I/O succeeded, attempt parse (no retry - parse errors are not transient) if ($null -ne $content) { - $result = Import-HistoryFromFile -FilePath $Script:HISTORY_FILE + $result = ConvertFrom-HistoryJson -JsonContent $content -SourceLabel $Script:HISTORY_FILE if ($null -ne $result) { return $result } @@ -325,8 +340,20 @@ function Save-History { $tempFile = $Script:HISTORY_FILE + ".tmp" try { - $History | ConvertTo-Json -Depth 10 | Set-Content -Path $tempFile -Encoding UTF8 -ErrorAction Stop - Move-Item -Path $tempFile -Destination $Script:HISTORY_FILE -Force -ErrorAction Stop + $jsonOutput = $History | ConvertTo-Json -Depth 10 + $jsonOutput | Set-Content -Path $tempFile -Encoding UTF8 -ErrorAction Stop + + try { + Move-Item -Path $tempFile -Destination $Script:HISTORY_FILE -Force -ErrorAction Stop + } + catch { + # Move failed - fall back to direct write so new data isn't lost + Write-VerboseLog "Atomic rename failed, falling back to direct write: $_" + $jsonOutput | Set-Content -Path $Script:HISTORY_FILE -Encoding UTF8 -ErrorAction Stop + if (Test-Path $tempFile) { + Remove-Item -Path $tempFile -Force -ErrorAction SilentlyContinue + } + } $totalPoints = 0 $driveCount = 0 @@ -1140,7 +1167,7 @@ function Main { Write-Log "Preserving existing data, skipping collection." Save-History -History $history Save-LogFile - exit 0 + exit 2 } if ($currentDrives.Count -eq 0) { From 64109a2cb0165f5eef56a2c44924be003efaff75 Mon Sep 17 00:00:00 2001 From: Zach Boogher <129975920+AlrightLad@users.noreply.github.com> Date: Mon, 16 Feb 2026 13:36:45 -0500 Subject: [PATCH 5/9] Fix formatting of the Main entry point MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Robustness improvements from code review All 14 CodeRabbit review findings (3 rounds) have been addressed: Safe timestamp parsing — ConvertTo-SafeDateTime helper using TryParse replaces all [DateTime]::Parse() calls, preventing script termination on malformed persisted timestamps. Substring length guard — Length check before .Substring(0, 10) on timestamps prevents ArgumentOutOfRangeException on short values. Backup recovery — Load-History checks for a backup file and attempts restoration before falling back to a fresh history when the primary file is corrupted. Eliminated redundant I/O — Extracted ConvertFrom-HistoryJson to accept raw JSON content. Load-History reads the file with retry protection for transient I/O errors, then passes content directly to parsing (no retry for parse errors, which are not transient). Ninja RMM field constants — All 16 field names defined as $Script:FIELD_* constants instead of inline string literals. Atomic write with fallback — Save-History uses temp file + Move-Item. If the rename fails, it falls back to a direct Set-Content write so the data point isn't lost. Log file performance — Save-LogFile uses append-only (Add-Content). The expensive prune cycle only triggers when the log exceeds 256 KB. Diagnostic logging — Import-HistoryFromFile catch block logs parse failure details via Write-VerboseLog instead of silently returning $null. Correct exit code — Drive enumeration failure exits with code 2 instead of 0, so Ninja RMM can detect non-transient failures. From cc1d179d1b2c48deefe710851a5357a8a4c63bab Mon Sep 17 00:00:00 2001 From: Zach Boogher <129975920+AlrightLad@users.noreply.github.com> Date: Wed, 25 Feb 2026 02:09:58 -0500 Subject: [PATCH 6/9] Add Storage Growth Monitor with SQLite persistence and visual HTML dashboard MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary Introduces a PowerShell script that runs as a daily Ninja RMM scheduled task on servers (physical Hyper-V hosts and VMs). It collects disk usage metrics, persists them to a shared SQLite database at %PROGRAMDATA%\dtc-inc\rmm\dtc-rmm.db, calculates storage growth trends via OLS linear regression, and writes a single HTML dashboard — complete with an SVG line chart — to one Ninja RMM WYSIWYG custom field (storageReport). The SQLite database is designed as a shared data store that other scripts and tools can query directly. The schema uses PostgreSQL-compatible conventions (snake_case columns, standard SQL types, parameterized queries) to enable a future migration to a centralized API service backed by PostgreSQL. What the script does 1. Drive Discovery Enumerates fixed drives via Win32_LogicalDisk and Win32_Volume, auto-detects the OS drive, and filters out recovery/EFI/system partitions, excluded filesystems (FAT/RAW), and drives under 1 GB. 2. SQLite Persistence Stores all data in %PROGRAMDATA%\dtc-inc\rmm\dtc-rmm.db using the PSSQLite module (auto-installed from PSGallery if not present). WAL journal mode is enabled for safe concurrent reads by other tools. Schema (4 tables): Table Purpose device One row per monitored machine (hostname, OS version, timestamps) drive One row per drive per device (letter, label, size, type OS/Data, online/offline status, alert state) metric Time-series data points — used GB, free GB, usage % — the core trend data alert_state Fire-once alert tracking per device (scaffolding for future centralized use) Indexes on (drive_id, timestamp) and (device_id) for efficient range queries. All SQL is parameterized — zero string concatenation. 3. Data Lifecycle 65-day rolling retention window with automatic metric pruning Drives not visible for 30+ days are marked Offline, then removed with their metrics Disk resize detection with logging 4. Trend Analysis Runs ordinary least squares (OLS) linear regression on each drive's metric history to calculate: GB/day growth rate and GB/month for display Days-until-full projections (capped at 5 years) R² confidence values Minimum 7 data points required; full confidence at 30+ 5. Status Classification Classifies each drive using a priority-based system (first match wins): Priority Status Condition 1 Critical Usage > 95% OR < 30 days until full 2 Attention 30–90 days until full 3 Growing > 0.1 GB/day growth, > 90 days remaining 4 Stable < 0.1 GB/day growth 5 Declining Negative growth rate 6 Insufficient Data < 7 data points 7 Offline Drive not currently visible Appends (Limited) when fewer than 30 data points are available. 6. Visual HTML Dashboard (single Ninja field) Instead of 16 separate text custom fields, the script writes one WYSIWYG field (storageReport) containing: Status header — Color-coded gradient banner (red/orange/amber/green/blue/gray) showing hostname, overall status, and timestamp SVG line chart — Usage % trends for all drives over the retention window. Color-coded lines from a colorblind-friendly palette, Y-axis gridlines at 0/25/50/75/100%, up to 7 X-axis date labels, and an inline legend Summary table — Per-drive row with color swatch matching the chart, drive letter + type tag + volume label, total/used/free sizes, GB/month growth, human-friendly time-to-full (e.g. 45d, 2.1mo, 5yr+), and a pill-style status badge Footer — Script version, drive count, data point count, engine info No more MAX_DATA_DRIVES = 3 limitation — all drives are shown. 7. Alerting Writes Windows Event Log entry 5001 (Warning) when a drive transitions to Critical status. Uses fire-once logic so alerts only fire on state transitions, not every run. Resets when the drive exits Critical. 8. Legacy JSON Migration On first run, auto-detects existing JSON history at C:\ProgramData\NinjaRMM\StorageMetrics\storage_history.json. If the SQLite database is empty, migrates all drives and metric data points, preserving alert-sent flags. Renames the JSON files to .migrated afterward. Zero data loss. Robustness All 14 CodeRabbit findings (3 review rounds) plus additional self-review findings have been addressed: Safe timestamp parsing — ConvertTo-SafeDateTime using TryParse throughout; null guard in Remove-StaleDrives prevents crash on malformed last_seen Null-safe query wrappers — All Invoke-SqliteQuery result sets filtered through Where-Object { $null -ne $_ } to prevent @($null) single-element arrays reaching downstream functions Fixed continue in expression — Legacy migration guard clause rewritten from unreliable if/else expression assignment to standard pattern Schema consistency — All timestamp columns including last_seen have DEFAULT values Robust final stats — SELECT COUNT(*) wrapped in try/catch so a locked database doesn't crash script exit Correct exit codes — Drive enumeration failure exits with code 2; PSSQLite/database failures exit with code 1 Log file performance — Append-only with prune cycle only when log exceeds 256 KB Exit codes Code Meaning 0 Success 1 Cannot create storage folder, PSSQLite unavailable, or database init failure 2 Drive enumeration failure Ninja RMM Setup Create one custom field: Name: storageReport Type: WYSIWYG Scope: Device (Role: Server) Database path %PROGRAMDATA%\dtc-inc\rmm\dtc-rmm.db Other scripts can query this directly. Schema is documented in the Initialize-Database function. Future: Centralized API + PostgreSQL The schema is designed for a clean migration: INTEGER PRIMARY KEY AUTOINCREMENT → SERIAL PRIMARY KEY strftime(...) defaults → NOW() TEXT timestamps → TIMESTAMPTZ All queries are standard SQL with named parameters — swap the connection layer and adjust date functions --- rmm-ninja/ServerGrowthTracking | 1533 ++++++++++++++++++-------------- 1 file changed, 870 insertions(+), 663 deletions(-) diff --git a/rmm-ninja/ServerGrowthTracking b/rmm-ninja/ServerGrowthTracking index a7c289b..8c69d80 100644 --- a/rmm-ninja/ServerGrowthTracking +++ b/rmm-ninja/ServerGrowthTracking @@ -1,17 +1,24 @@ <# .SYNOPSIS - Storage Growth Monitor - Tracks server storage growth trends over 60 days - and reports to Ninja RMM custom fields. + Storage Growth Monitor v2.0 - SQLite Edition + Tracks server storage growth trends and reports via Ninja RMM. .DESCRIPTION Collects daily storage metrics from servers (physical Hyper-V hosts and VMs), - maintains a 60-day rolling history, calculates growth trends using linear - regression, and updates Ninja RMM custom fields with actionable insights. - Critical trends trigger Ninja event log entries for automated alerting and - Halo PSA ticket creation. + persists to a shared SQLite database at %PROGRAMDATA%\dtc-inc\rmm\dtc-rmm.db, + calculates growth trends using linear regression, and generates a visual HTML + storage report written to a single Ninja RMM WYSIWYG custom field. + + The SQLite database is designed as a shared data store that other scripts and + tools can query. Schema uses PostgreSQL-compatible conventions for future + migration to a centralized API service backed by PostgreSQL. Technical Design Document: Ticket 1123004 - DTC Internal - Version: 2.5 (Final) + Version: 2.0 (SQLite Edition) + + Ninja RMM Setup: + - Create ONE WYSIWYG custom field named "storageReport" + - The script populates it with an HTML dashboard including SVG line chart .PARAMETER Verbose Enable detailed diagnostic output for troubleshooting. @@ -31,18 +38,18 @@ param() # ============================================================================ # CONSTANTS # ============================================================================ -$Script:VERSION = "1.0" -$Script:STORAGE_PATH = "C:\ProgramData\NinjaRMM\StorageMetrics" -$Script:HISTORY_FILE = Join-Path $Script:STORAGE_PATH "storage_history.json" -$Script:BACKUP_FILE = Join-Path $Script:STORAGE_PATH "storage_history.json.bak" +$Script:VERSION = "2.0" +$Script:STORAGE_PATH = Join-Path $env:ProgramData "dtc-inc\rmm" +$Script:DB_PATH = Join-Path $Script:STORAGE_PATH "dtc-rmm.db" $Script:LOG_FILE = Join-Path $Script:STORAGE_PATH "storage_monitor.log" $Script:EVENT_SOURCE = "StorageGrowthMonitor" + +# Retention & thresholds $Script:RETENTION_DAYS = 65 $Script:LOG_RETENTION_DAYS = 90 $Script:OFFLINE_REMOVAL_DAYS = 30 $Script:MIN_DATA_POINTS = 7 $Script:FULL_CONFIDENCE_POINTS = 30 -$Script:MAX_DATA_DRIVES = 3 $Script:DAYS_CAP = 1825 $Script:MIN_DRIVE_SIZE_GB = 1 $Script:CRITICAL_DAYS = 30 @@ -52,17 +59,15 @@ $Script:CRITICAL_USAGE_PERCENT = 95 $Script:GROWING_THRESHOLD_GB_DAY = 0.1 $Script:EXCLUDED_LABELS = @("Recovery", "EFI", "System Reserved", "SYSTEM", "Windows RE") $Script:EXCLUDED_FILESYSTEMS = @("FAT", "FAT32", "RAW") -$Script:JSON_VERSION = "1.0" - -# Ninja RMM field name constants (Section 10) -$Script:FIELD_SERVER_STATUS = "Server Storage Status" -$Script:FIELD_OS_STATUS = "OS Drive Status" -$Script:FIELD_OS_GROWTH = "OS Drive GB per Month" -$Script:FIELD_OS_DAYS = "OS Drive Days Until Full" -$Script:FIELD_DATA_LETTER = "Data Drive {0} Letter" -$Script:FIELD_DATA_STATUS = "Data Drive {0} Status" -$Script:FIELD_DATA_GROWTH = "Data Drive {0} GB per Month" -$Script:FIELD_DATA_DAYS = "Data Drive {0} Days Until Full" + +# Ninja RMM field (single WYSIWYG field) +$Script:FIELD_STORAGE_REPORT = "storageReport" + +# Legacy JSON paths (for migration) +$Script:LEGACY_JSON_PATH = "C:\ProgramData\NinjaRMM\StorageMetrics\storage_history.json" + +# Chart color palette (colorblind-friendly) +$Script:CHART_COLORS = @('#2563eb', '#16a34a', '#ea580c', '#9333ea', '#0d9488', '#dc2626', '#ca8a04', '#be185d') # ============================================================================ # LOGGING @@ -110,18 +115,16 @@ function Write-VerboseLog { } # ============================================================================ -# LOG FILE MANAGEMENT (Section 14) +# LOG FILE MANAGEMENT # ============================================================================ function Save-LogFile { $Script:LOG_PRUNE_THRESHOLD_KB = 256 try { - # Append new entries (fast path - no read required) if ($Script:LogBuffer.Count -gt 0) { $Script:LogBuffer | Add-Content -Path $Script:LOG_FILE -Encoding UTF8 -ErrorAction Stop } - # Prune only when file exceeds size threshold to avoid read-rewrite on every run $fileInfo = Get-Item $Script:LOG_FILE -ErrorAction SilentlyContinue if ($fileInfo -and ($fileInfo.Length / 1KB) -gt $Script:LOG_PRUNE_THRESHOLD_KB) { $cutoff = (Get-Date).AddDays(-$Script:LOG_RETENTION_DAYS) @@ -144,7 +147,7 @@ function Save-LogFile { if ($removedCount -gt 0) { $prunedLines | Set-Content -Path $Script:LOG_FILE -Encoding UTF8 -ErrorAction Stop - Write-VerboseLog "Log file pruning: $removedCount entries removed (older than $($Script:LOG_RETENTION_DAYS) days)" + Write-VerboseLog "Log pruning: $removedCount entries removed (older than $($Script:LOG_RETENTION_DAYS) days)" } } } @@ -154,13 +157,9 @@ function Save-LogFile { } # ============================================================================ -# SAFE TIMESTAMP PARSING HELPER +# SAFE TIMESTAMP PARSING # ============================================================================ function ConvertTo-SafeDateTime { - <# - .SYNOPSIS - Safely parses a timestamp string, returning $null on failure instead of throwing. - #> param([string]$Timestamp) $parsed = $null @@ -171,212 +170,454 @@ function ConvertTo-SafeDateTime { } # ============================================================================ -# JSON PERSISTENCE (Section 6) +# SQLITE MODULE MANAGEMENT # ============================================================================ -function New-EmptyHistory { - return [ordered]@{ - version = $Script:JSON_VERSION - deviceId = $env:COMPUTERNAME - lastUpdated = (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") - excessDriveAlertSent = $false - drives = [ordered]@{} - } -} - -function ConvertFrom-HistoryJson { +function Initialize-SQLiteModule { <# .SYNOPSIS - Parses raw JSON content into a history hashtable. Returns $null on failure. + Ensures the PSSQLite module is available. Installs from PSGallery if needed. #> - param([string]$JsonContent, [string]$SourceLabel = "unknown") - try { - $data = $JsonContent | ConvertFrom-Json -ErrorAction Stop + # Check if already available + if (Get-Module -Name PSSQLite -ErrorAction SilentlyContinue) { + Write-VerboseLog "PSSQLite module: Already loaded" + return $true + } - if (-not $data.version -or -not $data.drives) { - throw "Invalid JSON structure - missing version or drives" + if (Get-Module -ListAvailable -Name PSSQLite -ErrorAction SilentlyContinue) { + try { + Import-Module PSSQLite -ErrorAction Stop + Write-VerboseLog "PSSQLite module: Imported successfully" + return $true } - - $history = [ordered]@{ - version = $data.version - deviceId = if ($data.deviceId) { $data.deviceId } else { $env:COMPUTERNAME } - lastUpdated = if ($data.lastUpdated) { $data.lastUpdated } else { (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") } - excessDriveAlertSent = if ($null -ne $data.excessDriveAlertSent) { [bool]$data.excessDriveAlertSent } else { $false } - drives = [ordered]@{} + catch { + Write-Log "WARNING: PSSQLite found but failed to import: $_" } + } - foreach ($prop in $data.drives.PSObject.Properties) { - $driveLetter = $prop.Name - $driveData = $prop.Value - - $historyEntries = [System.Collections.ArrayList]::new() - if ($driveData.history) { - foreach ($entry in $driveData.history) { - [void]$historyEntries.Add([ordered]@{ - timestamp = $entry.timestamp - usedGB = [double]$entry.usedGB - freeGB = [double]$entry.freeGB - usagePercent = [double]$entry.usagePercent - }) - } - } + # Install from PSGallery + Write-Log "PSSQLite module not found - installing from PSGallery..." + try { + [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 - $history.drives[$driveLetter] = [ordered]@{ - volumeLabel = if ($driveData.volumeLabel) { $driveData.volumeLabel } else { "" } - totalSizeGB = [double]$driveData.totalSizeGB - driveType = if ($driveData.driveType) { $driveData.driveType } else { "Data" } - alertSent = if ($null -ne $driveData.alertSent) { [bool]$driveData.alertSent } else { $false } - status = if ($driveData.status) { $driveData.status } else { "Online" } - lastSeen = if ($driveData.lastSeen) { $driveData.lastSeen } else { (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") } - history = $historyEntries - } + # Ensure NuGet provider is available + $nuget = Get-PackageProvider -Name NuGet -ErrorAction SilentlyContinue + if (-not $nuget -or $nuget.Version -lt [Version]"2.8.5.201") { + Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force -ErrorAction Stop | Out-Null } - return $history + Install-Module -Name PSSQLite -Force -Scope AllUsers -AllowClobber -ErrorAction Stop + Import-Module PSSQLite -ErrorAction Stop + Write-Log ([char]0x2713 + " PSSQLite module installed and loaded") + return $true } catch { - Write-VerboseLog "ConvertFrom-HistoryJson: Failed to parse content from $SourceLabel`: $_" - return $null + Write-Log "ERROR: Cannot install PSSQLite module: $_" + Write-Log "Install manually: Install-Module -Name PSSQLite -Force -Scope AllUsers" + return $false } } -function Import-HistoryFromFile { +# ============================================================================ +# DATABASE SCHEMA & INITIALIZATION +# ============================================================================ +function Initialize-Database { <# .SYNOPSIS - Reads a history JSON file from disk and parses it. Returns $null on failure. + Creates the SQLite database and tables if they don't exist. + Schema uses PostgreSQL-compatible conventions for future migration. #> - param([string]$FilePath) - - if (-not (Test-Path $FilePath)) { return $null } try { - $content = Get-Content -Path $FilePath -Raw -Encoding UTF8 -ErrorAction Stop - return ConvertFrom-HistoryJson -JsonContent $content -SourceLabel $FilePath + # Enable WAL mode for better concurrent access + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "PRAGMA journal_mode=WAL" -ErrorAction Stop | Out-Null + + # Device table - one row per monitored machine + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" + CREATE TABLE IF NOT EXISTS device ( + device_id TEXT PRIMARY KEY, + hostname TEXT NOT NULL, + os_version TEXT, + created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%S', 'now', 'localtime')), + updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%S', 'now', 'localtime')) + ) +"@ -ErrorAction Stop + + # Drive table - one row per drive per device + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" + CREATE TABLE IF NOT EXISTS drive ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + device_id TEXT NOT NULL REFERENCES device(device_id), + drive_letter TEXT NOT NULL, + volume_label TEXT DEFAULT '', + total_size_gb REAL NOT NULL, + drive_type TEXT NOT NULL CHECK(drive_type IN ('OS', 'Data')), + status TEXT NOT NULL DEFAULT 'Online' CHECK(status IN ('Online', 'Offline')), + last_seen TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%S', 'now', 'localtime')), + alert_sent INTEGER NOT NULL DEFAULT 0, + created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%S', 'now', 'localtime')), + UNIQUE(device_id, drive_letter) + ) +"@ -ErrorAction Stop + + # Metric table - time-series storage data points + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" + CREATE TABLE IF NOT EXISTS metric ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + drive_id INTEGER NOT NULL REFERENCES drive(id), + timestamp TEXT NOT NULL, + used_gb REAL NOT NULL, + free_gb REAL NOT NULL, + usage_percent REAL NOT NULL + ) +"@ -ErrorAction Stop + + # Alert state table - fire-once tracking per device + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" + CREATE TABLE IF NOT EXISTS alert_state ( + device_id TEXT NOT NULL REFERENCES device(device_id), + alert_type TEXT NOT NULL, + is_active INTEGER NOT NULL DEFAULT 0, + last_triggered TEXT, + PRIMARY KEY (device_id, alert_type) + ) +"@ -ErrorAction Stop + + # Indexes for efficient queries + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "CREATE INDEX IF NOT EXISTS idx_metric_drive_ts ON metric(drive_id, timestamp)" -ErrorAction Stop + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "CREATE INDEX IF NOT EXISTS idx_drive_device ON drive(device_id)" -ErrorAction Stop + + Write-VerboseLog "Database initialized: $($Script:DB_PATH)" + return $true } catch { - Write-VerboseLog "Import-HistoryFromFile: Failed to read '$FilePath': $_" - return $null + Write-Log "ERROR: Failed to initialize database: $_" + return $false } } -function Load-History { - if (-not (Test-Path $Script:HISTORY_FILE)) { - Write-VerboseLog "Existing JSON: No - creating new history" - return New-EmptyHistory - } +# ============================================================================ +# DATABASE OPERATIONS (CRUD) +# ============================================================================ +function Get-DeviceRecord { + <# + .SYNOPSIS + Gets or creates the device record for the current machine. + #> + param([string]$DeviceId, [string]$Hostname) - $fileInfo = Get-Item $Script:HISTORY_FILE -ErrorAction SilentlyContinue - Write-VerboseLog "Existing JSON: Yes ($([math]::Round($fileInfo.Length / 1KB)) KB)" + $now = (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") + $osVersion = "" + try { + $os = Get-CimInstance Win32_OperatingSystem -ErrorAction SilentlyContinue + if ($os) { $osVersion = "$($os.Caption) $($os.Version)" } + } + catch { } - # Attempt to read and parse, retrying only on I/O errors - $maxRetries = 3 - $retryDelay = 5 - $content = $null + $existing = Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "SELECT device_id FROM device WHERE device_id = @id" -SqlParameters @{ id = $DeviceId } - for ($attempt = 1; $attempt -le $maxRetries; $attempt++) { - try { - $content = Get-Content -Path $Script:HISTORY_FILE -Raw -Encoding UTF8 -ErrorAction Stop - break # I/O succeeded, move on to parsing + if ($existing) { + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "UPDATE device SET hostname = @hostname, os_version = @os, updated_at = @now WHERE device_id = @id" -SqlParameters @{ + id = $DeviceId + hostname = $Hostname + os = $osVersion + now = $now } - catch { - Write-Log "WARNING: Failed to read history file (attempt $attempt/$maxRetries): $_" - if ($attempt -lt $maxRetries) { - Start-Sleep -Seconds $retryDelay - } + } + else { + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "INSERT INTO device (device_id, hostname, os_version, created_at, updated_at) VALUES (@id, @hostname, @os, @now, @now)" -SqlParameters @{ + id = $DeviceId + hostname = $Hostname + os = $osVersion + now = $now } } +} - # If I/O succeeded, attempt parse (no retry - parse errors are not transient) - if ($null -ne $content) { - $result = ConvertFrom-HistoryJson -JsonContent $content -SourceLabel $Script:HISTORY_FILE - if ($null -ne $result) { - return $result +function Get-DriveRecord { + <# + .SYNOPSIS + Gets a drive record by device_id and drive_letter. + #> + param([string]$DeviceId, [string]$DriveLetter) + + return Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "SELECT * FROM drive WHERE device_id = @did AND drive_letter = @letter" -SqlParameters @{ + did = $DeviceId + letter = $DriveLetter + } +} + +function Save-DriveRecord { + <# + .SYNOPSIS + Upserts a drive record (insert or update). + #> + param( + [string]$DeviceId, + [string]$DriveLetter, + [string]$VolumeLabel, + [double]$TotalSizeGB, + [string]$DriveType, + [string]$Status = "Online" + ) + + $now = (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") + $existing = Get-DriveRecord -DeviceId $DeviceId -DriveLetter $DriveLetter + + if ($existing) { + # Detect disk resize + if ([math]::Abs($existing.total_size_gb - $TotalSizeGB) -gt 0.01) { + Write-Log "Drive ${DriveLetter}: disk size changed from $($existing.total_size_gb) GB to $TotalSizeGB GB" + } + + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" + UPDATE drive SET volume_label = @label, total_size_gb = @size, drive_type = @type, + status = @status, last_seen = @now + WHERE device_id = @did AND drive_letter = @letter +"@ -SqlParameters @{ + did = $DeviceId + letter = $DriveLetter + label = $VolumeLabel + size = $TotalSizeGB + type = $DriveType + status = $Status + now = $now } - Write-Log "WARNING: Primary history file failed to parse." + return $existing.id } else { - Write-Log "WARNING: Could not read primary history file after $maxRetries attempts." + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" + INSERT INTO drive (device_id, drive_letter, volume_label, total_size_gb, drive_type, status, last_seen, created_at) + VALUES (@did, @letter, @label, @size, @type, @status, @now, @now) +"@ -SqlParameters @{ + did = $DeviceId + letter = $DriveLetter + label = $VolumeLabel + size = $TotalSizeGB + type = $DriveType + status = $Status + now = $now + } + + $newDrive = Get-DriveRecord -DeviceId $DeviceId -DriveLetter $DriveLetter + return $newDrive.id } +} - # Primary file failed - attempt backup recovery - if (Test-Path $Script:BACKUP_FILE) { - Write-Log "WARNING: Attempting backup recovery..." - $backupResult = Import-HistoryFromFile -FilePath $Script:BACKUP_FILE - if ($null -ne $backupResult) { - Write-Log "Backup recovery successful - restored from $($Script:BACKUP_FILE)" - return $backupResult - } - Write-Log "WARNING: Backup file also corrupted." +function Add-MetricRecord { + <# + .SYNOPSIS + Inserts a new metric data point for a drive. + #> + param( + [int]$DriveId, + [string]$Timestamp, + [double]$UsedGB, + [double]$FreeGB, + [double]$UsagePercent + ) + + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" + INSERT INTO metric (drive_id, timestamp, used_gb, free_gb, usage_percent) + VALUES (@driveId, @ts, @used, @free, @pct) +"@ -SqlParameters @{ + driveId = $DriveId + ts = $Timestamp + used = $UsedGB + free = $FreeGB + pct = $UsagePercent } +} - # Both primary and backup failed - rename corrupted file and start fresh - Write-Log "WARNING: History unrecoverable. Renaming and starting fresh." - $corruptedPath = $Script:HISTORY_FILE + ".corrupted" - try { - Move-Item -Path $Script:HISTORY_FILE -Destination $corruptedPath -Force -ErrorAction Stop +function Get-DriveMetrics { + <# + .SYNOPSIS + Gets metric history for a drive within the retention window. + Returns objects with: timestamp, usedGB, freeGB, usagePercent + #> + param([int]$DriveId) + + $cutoff = (Get-Date).AddDays(-$Script:RETENTION_DAYS).ToString("yyyy-MM-ddTHH:mm:ss") + + $results = Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" + SELECT timestamp, used_gb AS usedGB, free_gb AS freeGB, usage_percent AS usagePercent + FROM metric + WHERE drive_id = @driveId AND timestamp >= @cutoff + ORDER BY timestamp +"@ -SqlParameters @{ + driveId = $DriveId + cutoff = $cutoff } - catch { - Write-Log "WARNING: Could not rename corrupted file: $_" + if ($null -eq $results) { return @() } + return @($results | Where-Object { $null -ne $_ }) +} + +function Get-AllDeviceDrives { + <# + .SYNOPSIS + Gets all drive records for a device. + #> + param([string]$DeviceId) + + $results = Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "SELECT * FROM drive WHERE device_id = @did ORDER BY drive_letter" -SqlParameters @{ + did = $DeviceId } - return New-EmptyHistory + if ($null -eq $results) { return @() } + return @($results | Where-Object { $null -ne $_ }) } -function Save-History { - param([hashtable]$History) +function Set-DriveAlertSent { + param([int]$DriveId, [bool]$AlertSent) - # Backup existing file before overwriting - if (Test-Path $Script:HISTORY_FILE) { - try { - Copy-Item -Path $Script:HISTORY_FILE -Destination $Script:BACKUP_FILE -Force -ErrorAction Stop - } - catch { - Write-Log "WARNING: Could not create backup: $_" - } + $val = if ($AlertSent) { 1 } else { 0 } + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "UPDATE drive SET alert_sent = @val WHERE id = @id" -SqlParameters @{ + id = $DriveId + val = $val + } +} + +function Set-DriveOffline { + param([int]$DriveId) + + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "UPDATE drive SET status = 'Offline' WHERE id = @id" -SqlParameters @{ + id = $DriveId } +} - $History.lastUpdated = (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") +function Remove-StaleDrives { + <# + .SYNOPSIS + Removes drives that have been offline longer than the threshold, along with their metrics. + #> + param([string]$DeviceId) - # Atomic write: write to temp file first, then rename to prevent corruption - $tempFile = $Script:HISTORY_FILE + ".tmp" + $cutoff = (Get-Date).AddDays(-$Script:OFFLINE_REMOVAL_DAYS).ToString("yyyy-MM-ddTHH:mm:ss") + + $rawResults = Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" + SELECT id, drive_letter, last_seen FROM drive + WHERE device_id = @did AND status = 'Offline' AND last_seen < @cutoff +"@ -SqlParameters @{ + did = $DeviceId + cutoff = $cutoff + } + $staleDrives = if ($null -eq $rawResults) { @() } else { @($rawResults | Where-Object { $null -ne $_ }) } + + foreach ($drive in $staleDrives) { + $parsedLastSeen = ConvertTo-SafeDateTime $drive.last_seen + if ($null -eq $parsedLastSeen) { $parsedLastSeen = Get-Date } + $daysOffline = [math]::Round(((Get-Date) - $parsedLastSeen).TotalDays, 0) + Write-Log "Drive $($drive.drive_letter): Offline for $daysOffline days - removing from database" + + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "DELETE FROM metric WHERE drive_id = @id" -SqlParameters @{ id = $drive.id } + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "DELETE FROM drive WHERE id = @id" -SqlParameters @{ id = $drive.id } + } + + return $staleDrives.Count +} + +function Remove-OldMetrics { + <# + .SYNOPSIS + Prunes metric data points older than the retention window. + #> + + $cutoff = (Get-Date).AddDays(-$Script:RETENTION_DAYS).ToString("yyyy-MM-ddTHH:mm:ss") + + $result = Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "SELECT COUNT(*) AS cnt FROM metric WHERE timestamp < @cutoff" -SqlParameters @{ cutoff = $cutoff } + $count = if ($result) { $result.cnt } else { 0 } + + if ($count -gt 0) { + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "DELETE FROM metric WHERE timestamp < @cutoff" -SqlParameters @{ cutoff = $cutoff } + Write-VerboseLog "Pruned $count metric records older than $($Script:RETENTION_DAYS) days" + } +} + +# ============================================================================ +# LEGACY JSON MIGRATION (one-time) +# ============================================================================ +function Import-LegacyJsonHistory { + <# + .SYNOPSIS + Migrates existing JSON history data to the SQLite database. + Runs once, then renames the JSON file to .migrated. + #> + param([string]$DeviceId) + + if (-not (Test-Path $Script:LEGACY_JSON_PATH)) { return $false } + + Write-Log "Found legacy JSON history - migrating to SQLite..." try { - $jsonOutput = $History | ConvertTo-Json -Depth 10 - $jsonOutput | Set-Content -Path $tempFile -Encoding UTF8 -ErrorAction Stop + $content = Get-Content -Path $Script:LEGACY_JSON_PATH -Raw -Encoding UTF8 -ErrorAction Stop + $data = $content | ConvertFrom-Json -ErrorAction Stop - try { - Move-Item -Path $tempFile -Destination $Script:HISTORY_FILE -Force -ErrorAction Stop + if (-not $data.drives) { + Write-Log "WARNING: Legacy JSON has no drives section, skipping migration" + return $false } - catch { - # Move failed - fall back to direct write so new data isn't lost - Write-VerboseLog "Atomic rename failed, falling back to direct write: $_" - $jsonOutput | Set-Content -Path $Script:HISTORY_FILE -Encoding UTF8 -ErrorAction Stop - if (Test-Path $tempFile) { - Remove-Item -Path $tempFile -Force -ErrorAction SilentlyContinue + + $migratedDrives = 0 + $migratedPoints = 0 + + foreach ($prop in $data.drives.PSObject.Properties) { + $letter = $prop.Name + $driveData = $prop.Value + + $driveType = if ($driveData.driveType) { $driveData.driveType } else { "Data" } + $volumeLabel = if ($driveData.volumeLabel) { $driveData.volumeLabel } else { "" } + $totalSize = if ($driveData.totalSizeGB) { [double]$driveData.totalSizeGB } else { 0 } + $status = if ($driveData.status) { $driveData.status } else { "Online" } + $lastSeen = if ($driveData.lastSeen) { $driveData.lastSeen } else { (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") } + $alertSent = if ($driveData.alertSent) { [bool]$driveData.alertSent } else { $false } + + # Create drive record + $driveId = Save-DriveRecord -DeviceId $DeviceId -DriveLetter $letter -VolumeLabel $volumeLabel ` + -TotalSizeGB $totalSize -DriveType $driveType -Status $status + + if ($alertSent) { + Set-DriveAlertSent -DriveId $driveId -AlertSent $true + } + + # Migrate history data points + if ($driveData.history) { + foreach ($entry in $driveData.history) { + if (-not $entry.timestamp) { continue } + $ts = $entry.timestamp + Add-MetricRecord -DriveId $driveId -Timestamp $ts ` + -UsedGB ([double]$entry.usedGB) -FreeGB ([double]$entry.freeGB) ` + -UsagePercent ([double]$entry.usagePercent) + $migratedPoints++ + } } + + $migratedDrives++ } - $totalPoints = 0 - $driveCount = 0 - foreach ($drive in $History.drives.Values) { - $driveCount++ - $totalPoints += $drive.history.Count + # Rename the old JSON file + $migratedPath = $Script:LEGACY_JSON_PATH + ".migrated" + Move-Item -Path $Script:LEGACY_JSON_PATH -Destination $migratedPath -Force -ErrorAction SilentlyContinue + + # Also rename backup if it exists + $backupPath = $Script:LEGACY_JSON_PATH + ".bak" + if (Test-Path $backupPath) { + Move-Item -Path $backupPath -Destination ($backupPath + ".migrated") -Force -ErrorAction SilentlyContinue } - Write-Log ([char]0x2713 + " History file saved ($driveCount drives, $totalPoints data points)") + + Write-Log ([char]0x2713 + " Migration complete: $migratedDrives drives, $migratedPoints data points") + return $true } catch { - Write-Log "ERROR: Failed to save history file: $_" - # Clean up temp file if it exists - if (Test-Path $tempFile) { - Remove-Item -Path $tempFile -Force -ErrorAction SilentlyContinue - } + Write-Log "WARNING: JSON migration failed: $_" + Write-Log "Legacy data preserved at $($Script:LEGACY_JSON_PATH)" + return $false } } # ============================================================================ -# DRIVE DISCOVERY & FILTERING (Section 5) +# DRIVE DISCOVERY & FILTERING # ============================================================================ function Get-FilteredDrives { - # Detect OS drive (Section 5.3) + # Detect OS drive $osDriveLetter = (Get-CimInstance Win32_OperatingSystem -ErrorAction SilentlyContinue).SystemDrive if (-not $osDriveLetter) { Write-Log "WARNING: Could not detect OS drive, falling back to C:" @@ -384,11 +625,11 @@ function Get-FilteredDrives { } Write-VerboseLog "OS Drive detected: $osDriveLetter" - # Primary source - Win32_LogicalDisk (Section 5.1) + # Primary source - Win32_LogicalDisk $logicalDisks = @(Get-CimInstance -ClassName Win32_LogicalDisk -Filter "DriveType=3" -ErrorAction Stop) Write-VerboseLog "Drive Discovery: $($logicalDisks.Count) drives found" - # Secondary source - Win32_Volume for filtering metadata only (Section 5.1) + # Secondary source - Win32_Volume for filtering metadata $volumes = $null try { $volumes = @(Get-CimInstance -ClassName Win32_Volume -ErrorAction Stop) @@ -396,47 +637,37 @@ function Get-FilteredDrives { } catch { Write-Log "WARNING: Win32_Volume query failed - continuing with LogicalDisk only" - Write-VerboseLog "Win32_Volume query: Failed - $_" } $filteredDrives = [System.Collections.ArrayList]::new() foreach ($disk in $logicalDisks) { - $letter = $disk.DeviceID # e.g., "C:" + $letter = $disk.DeviceID $sizeGB = [math]::Round($disk.Size / 1GB, 3) $label = $disk.VolumeName - # Find matching volume for additional metadata $matchingVolume = $null if ($volumes) { - $matchingVolume = $volumes | Where-Object { - $_.DriveLetter -eq $letter - } | Select-Object -First 1 + $matchingVolume = $volumes | Where-Object { $_.DriveLetter -eq $letter } | Select-Object -First 1 } - $fileSystem = if ($matchingVolume -and $matchingVolume.FileSystem) { - $matchingVolume.FileSystem - } else { "" } + $fileSystem = if ($matchingVolume -and $matchingVolume.FileSystem) { $matchingVolume.FileSystem } else { "" } - # Use volume label from Volume if LogicalDisk doesn't have one if (-not $label -and $matchingVolume -and $matchingVolume.Label) { $label = $matchingVolume.Label } - # --- Exclusion checks (Section 5.2) --- - - # Size < 1 GB + # Exclusion checks if ($sizeGB -lt $Script:MIN_DRIVE_SIZE_GB) { - Write-VerboseLog " $letter DriveType=3 Size=${sizeGB}GB - EXCLUDED (Size < 1GB)" + Write-VerboseLog " $letter Size=${sizeGB}GB - EXCLUDED (< 1GB)" continue } - # Excluded volume labels $labelExcluded = $false if ($label) { foreach ($excludedLabel in $Script:EXCLUDED_LABELS) { if ($label -ieq $excludedLabel) { - Write-VerboseLog " $letter DriveType=3 Size=${sizeGB}GB Label=`"$label`" - EXCLUDED ($excludedLabel partition)" + Write-VerboseLog " $letter Label=`"$label`" - EXCLUDED ($excludedLabel)" $labelExcluded = $true break } @@ -444,26 +675,21 @@ function Get-FilteredDrives { } if ($labelExcluded) { continue } - # Excluded file systems (FAT, FAT32, RAW) if ($fileSystem -and $Script:EXCLUDED_FILESYSTEMS -contains $fileSystem) { - Write-VerboseLog " $letter DriveType=3 Size=${sizeGB}GB - EXCLUDED (FileSystem: $fileSystem)" + Write-VerboseLog " $letter - EXCLUDED (FileSystem: $fileSystem)" continue } - # Drive passed all filters - classify and collect metrics + # Drive passed all filters $isOS = ($letter -eq $osDriveLetter) $driveType = if ($isOS) { "OS" } else { "Data" } - Write-VerboseLog " $letter DriveType=3 Size=${sizeGB}GB - INCLUDED ($driveType)" - $usedBytes = $disk.Size - $disk.FreeSpace $usedGB = [math]::Round($usedBytes / 1GB, 3) $freeGB = [math]::Round($disk.FreeSpace / 1GB, 3) - $usagePercent = if ($disk.Size -gt 0) { - [math]::Round(($usedBytes / $disk.Size) * 100, 2) - } else { 0 } + $usagePercent = if ($disk.Size -gt 0) { [math]::Round(($usedBytes / $disk.Size) * 100, 2) } else { 0 } - Write-VerboseLog "Drive $letter Raw values - Total: $($sizeGB.ToString('F3')) Used: $($usedGB.ToString('F3')) Free: $($freeGB.ToString('F3')) Percent: $($usagePercent.ToString('F3'))%" + Write-VerboseLog " $letter Size=${sizeGB}GB - INCLUDED ($driveType)" [void]$filteredDrives.Add(@{ Letter = $letter @@ -481,149 +707,22 @@ function Get-FilteredDrives { } # ============================================================================ -# HISTORY UPDATE & PRUNING (Section 6) -# ============================================================================ -function Update-History { - param( - [hashtable]$History, - [array]$CurrentDrives - ) - - $now = (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") - $cutoffDate = (Get-Date).AddDays(-$Script:RETENTION_DAYS) - $visibleLetters = @($CurrentDrives | ForEach-Object { $_.Letter }) - - # Update/add visible drives - foreach ($drive in $CurrentDrives) { - $letter = $drive.Letter - - if (-not $History.drives.ContainsKey($letter)) { - # New drive - create entry - $History.drives[$letter] = [ordered]@{ - volumeLabel = $drive.VolumeLabel - totalSizeGB = $drive.TotalSizeGB - driveType = $drive.DriveType - alertSent = $false - status = "Online" - lastSeen = $now - history = [System.Collections.ArrayList]::new() - } - } - else { - # Existing drive - update metadata - $existingDrive = $History.drives[$letter] - $existingDrive.status = "Online" - $existingDrive.lastSeen = $now - $existingDrive.driveType = $drive.DriveType - $existingDrive.volumeLabel = $drive.VolumeLabel - - # Detect disk resize (Section 5.5) - if ($existingDrive.totalSizeGB -ne $drive.TotalSizeGB) { - Write-Log "Drive ${letter}: disk size changed from $($existingDrive.totalSizeGB) GB to $($drive.TotalSizeGB) GB" - Write-VerboseLog "Drive ${letter}: Size change detected - Old: $($existingDrive.totalSizeGB) GB, New: $($drive.TotalSizeGB) GB" - $existingDrive.totalSizeGB = $drive.TotalSizeGB - } - } - - # Append new data point - [void]$History.drives[$letter].history.Add([ordered]@{ - timestamp = $now - usedGB = $drive.UsedGB - freeGB = $drive.FreeGB - usagePercent = $drive.UsagePercent - }) - - # Prune entries older than 65-day retention window (defensive parsing) - $driveHistory = $History.drives[$letter].history - $beforeCount = $driveHistory.Count - $prunedHistory = [System.Collections.ArrayList]::new() - foreach ($entry in $driveHistory) { - $entryDate = ConvertTo-SafeDateTime -Timestamp $entry.timestamp - if ($null -eq $entryDate) { - Write-VerboseLog "Drive ${letter}: Skipping entry with unparseable timestamp: $($entry.timestamp)" - continue - } - if ($entryDate -ge $cutoffDate) { - [void]$prunedHistory.Add($entry) - } - } - $History.drives[$letter].history = $prunedHistory - $afterCount = $prunedHistory.Count - - if ($beforeCount -ne $afterCount) { - Write-VerboseLog "Drive ${letter}: Pruned $($beforeCount - $afterCount) entries older than $($Script:RETENTION_DAYS) days" - } - - if ($prunedHistory.Count -gt 0) { - $oldestTs = $prunedHistory[0].timestamp - $newestTs = $prunedHistory[$prunedHistory.Count - 1].timestamp - $oldest = if ($oldestTs.Length -ge 10) { $oldestTs.Substring(0, 10) } else { $oldestTs } - $newest = if ($newestTs.Length -ge 10) { $newestTs.Substring(0, 10) } else { $newestTs } - Write-VerboseLog "Drive ${letter}: History - $($prunedHistory.Count) points loaded, $afterCount after pruning (oldest: $oldest, newest: $newest)" - } - } - - # Handle drives in history that are NOT currently visible (Section 7) - $drivesToRemove = [System.Collections.ArrayList]::new() - - foreach ($letter in @($History.drives.Keys)) { - if ($letter -notin $visibleLetters) { - $driveData = $History.drives[$letter] - - if ($driveData.status -ne "Offline") { - Write-VerboseLog "Drive ${letter}: No longer visible - marking Offline" - $driveData.status = "Offline" - } - - # Remove if offline > 30 days (Section 7.1) - defensive parsing - if ($driveData.lastSeen) { - $lastSeenDate = ConvertTo-SafeDateTime -Timestamp $driveData.lastSeen - if ($null -ne $lastSeenDate) { - $daysOffline = ((Get-Date) - $lastSeenDate).TotalDays - if ($daysOffline -gt $Script:OFFLINE_REMOVAL_DAYS) { - Write-Log "Drive ${letter}: Offline for $([math]::Round($daysOffline, 0)) days - removing from history" - [void]$drivesToRemove.Add($letter) - } - } - else { - Write-VerboseLog "Drive ${letter}: Unparseable lastSeen timestamp: $($driveData.lastSeen)" - } - } - } - } - - foreach ($letter in $drivesToRemove) { - $History.drives.Remove($letter) - } - - return $History -} - -# ============================================================================ -# LINEAR REGRESSION (Section 8) +# LINEAR REGRESSION # ============================================================================ function Get-LinearRegression { - param( - [System.Collections.ArrayList]$HistoryData - ) + param([array]$HistoryData) $n = $HistoryData.Count if ($n -lt 2) { return @{ Slope = 0; Intercept = 0; RSquared = 0 } } - # Convert timestamps to days from first measurement (Section 8.1) - defensive parsing $firstTimestamp = ConvertTo-SafeDateTime -Timestamp $HistoryData[0].timestamp if ($null -eq $firstTimestamp) { - Write-VerboseLog "Linear regression: Cannot parse first timestamp, returning zero slope" return @{ Slope = 0; Intercept = 0; RSquared = 0 } } - $sumX = 0.0 - $sumY = 0.0 - $sumXY = 0.0 - $sumX2 = 0.0 - $sumY2 = 0.0 + $sumX = 0.0; $sumY = 0.0; $sumXY = 0.0; $sumX2 = 0.0; $sumY2 = 0.0 $validPoints = 0 foreach ($point in $HistoryData) { @@ -633,11 +732,7 @@ function Get-LinearRegression { $x = ($pointDate - $firstTimestamp).TotalDays $y = [double]$point.usedGB - $sumX += $x - $sumY += $y - $sumXY += ($x * $y) - $sumX2 += ($x * $x) - $sumY2 += ($y * $y) + $sumX += $x; $sumY += $y; $sumXY += ($x * $y); $sumX2 += ($x * $x); $sumY2 += ($y * $y) $validPoints++ } @@ -645,7 +740,6 @@ function Get-LinearRegression { return @{ Slope = 0; Intercept = 0; RSquared = 0 } } - # OLS formula (Section 8.3) $denominator = ($validPoints * $sumX2) - ($sumX * $sumX) if ([math]::Abs($denominator) -lt 1e-10) { return @{ Slope = 0; Intercept = $sumY / $validPoints; RSquared = 0 } @@ -654,14 +748,12 @@ function Get-LinearRegression { $slope = (($validPoints * $sumXY) - ($sumX * $sumY)) / $denominator $intercept = ($sumY - ($slope * $sumX)) / $validPoints - # Calculate R-squared for trend confidence $meanY = $sumY / $validPoints $ssTot = $sumY2 - ($validPoints * $meanY * $meanY) $ssRes = 0.0 foreach ($point in $HistoryData) { $pointDate = ConvertTo-SafeDateTime -Timestamp $point.timestamp if ($null -eq $pointDate) { continue } - $x = ($pointDate - $firstTimestamp).TotalDays $y = [double]$point.usedGB $predicted = $slope * $x + $intercept @@ -670,142 +762,132 @@ function Get-LinearRegression { $rSquared = if ($ssTot -gt 0) { 1 - ($ssRes / $ssTot) } else { 0 } return @{ - Slope = $slope # GB per day + Slope = $slope Intercept = $intercept RSquared = [math]::Round($rSquared, 2) } } # ============================================================================ -# TREND CALCULATION & STATUS CLASSIFICATION (Sections 8, 9) +# TREND ANALYSIS & STATUS CLASSIFICATION # ============================================================================ function Get-DriveAnalysis { + <# + .SYNOPSIS + Analyzes a drive's metrics and classifies its status. + #> param( - [hashtable]$DriveData, - [string]$DriveLetter + [object]$DriveRecord, + [array]$Metrics ) $result = @{ - Letter = $DriveLetter - VolumeLabel = $DriveData.volumeLabel - TotalSizeGB = $DriveData.totalSizeGB - DriveType = $DriveData.driveType + Letter = $DriveRecord.drive_letter + VolumeLabel = $DriveRecord.volume_label + TotalSizeGB = $DriveRecord.total_size_gb + DriveType = $DriveRecord.drive_type + DriveId = $DriveRecord.id Status = "" GBPerMonth = "" DaysUntilFull = "" - AlertSent = $DriveData.alertSent - DriveStatus = $DriveData.status + AlertSent = [bool]$DriveRecord.alert_sent + DriveStatus = $DriveRecord.status IsLimited = $false NumericDays = $null RawGrowthPerDay = 0 CurrentUsedGB = 0 CurrentFreeGB = 0 CurrentPercent = 0 + Metrics = $Metrics + DataPoints = $Metrics.Count } - # Handle offline drives (Section 7.2) - if ($DriveData.status -eq "Offline") { + # Handle offline drives + if ($DriveRecord.status -eq "Offline") { $result.Status = "Offline" $result.GBPerMonth = "OFFLINE" $result.DaysUntilFull = "OFFLINE" return $result } - $pointCount = $DriveData.history.Count + $pointCount = $Metrics.Count - # Populate current metrics from latest data point if available + # Current metrics from latest data point if ($pointCount -gt 0) { - $latest = $DriveData.history[$pointCount - 1] + $latest = $Metrics[$pointCount - 1] $result.CurrentUsedGB = [double]$latest.usedGB $result.CurrentFreeGB = [double]$latest.freeGB $result.CurrentPercent = [double]$latest.usagePercent } - # Check minimum data points (Section 8.2) + # Minimum data points check if ($pointCount -lt $Script:MIN_DATA_POINTS) { $result.Status = "Insufficient Data" $result.GBPerMonth = "Insufficient Data" $result.DaysUntilFull = "Insufficient Data" - Write-VerboseLog "Drive ${DriveLetter}: $pointCount data points - Insufficient Data" + Write-VerboseLog "Drive $($DriveRecord.drive_letter): $pointCount data points - Insufficient Data" return $result } $isLimited = $pointCount -lt $Script:FULL_CONFIDENCE_POINTS $result.IsLimited = $isLimited - # Run linear regression (Section 8.1) - $regression = Get-LinearRegression -HistoryData $DriveData.history + # Linear regression + $regression = Get-LinearRegression -HistoryData $Metrics $dailyGrowth = $regression.Slope $monthlyGrowth = [math]::Round($dailyGrowth * 30, 3) $result.RawGrowthPerDay = $dailyGrowth - Write-VerboseLog "Drive ${DriveLetter}: Regression - slope=$([math]::Round($dailyGrowth, 4)) GB/day, R`u{00B2}=$($regression.RSquared)" + Write-VerboseLog "Drive $($DriveRecord.drive_letter): slope=$([math]::Round($dailyGrowth, 4)) GB/day, R$([char]0x00B2)=$($regression.RSquared)" $result.GBPerMonth = $monthlyGrowth.ToString("F3") - # Get current free space for days-until-full calculation $currentFreeGB = $result.CurrentFreeGB $currentUsagePercent = $result.CurrentPercent - # Calculate days until full (Section 8.4, 9.1) + # Days until full if ($dailyGrowth -le 0) { - if ($dailyGrowth -eq 0) { - $result.DaysUntilFull = "No Growth" - } else { - $result.DaysUntilFull = "Declining" - } - } else { + $result.DaysUntilFull = if ($dailyGrowth -eq 0) { "No Growth" } else { "Declining" } + } + else { $daysUntilFull = $currentFreeGB / $dailyGrowth - if ($daysUntilFull -gt $Script:DAYS_CAP) { - $daysUntilFull = $Script:DAYS_CAP - } + if ($daysUntilFull -gt $Script:DAYS_CAP) { $daysUntilFull = $Script:DAYS_CAP } $daysUntilFull = [math]::Round($daysUntilFull, 2) $result.DaysUntilFull = $daysUntilFull.ToString("F2") $result.NumericDays = $daysUntilFull } - # Status classification - first match wins (Section 9.2) - # Priority 3: Critical - Days < 30 OR usage > 95% + # Status classification - first match wins $isCritical = $false - if ($currentUsagePercent -gt $Script:CRITICAL_USAGE_PERCENT) { - $isCritical = $true - } - if ($null -ne $result.NumericDays -and $result.NumericDays -lt $Script:CRITICAL_DAYS) { - $isCritical = $true - } + if ($currentUsagePercent -gt $Script:CRITICAL_USAGE_PERCENT) { $isCritical = $true } + if ($null -ne $result.NumericDays -and $result.NumericDays -lt $Script:CRITICAL_DAYS) { $isCritical = $true } if ($isCritical) { $result.Status = "Critical" } - # Priority 4: Attention - Days 30-90 inclusive elseif ($null -ne $result.NumericDays -and $result.NumericDays -ge $Script:ATTENTION_DAYS_LOW -and $result.NumericDays -le $Script:ATTENTION_DAYS_HIGH) { $result.Status = "Attention" } - # Priority 5: Declining - negative growth rate elseif ($dailyGrowth -lt 0) { $result.Status = "Declining" } - # Priority 6: Growing - >= 0.1 GB/day AND days > 90 elseif ($dailyGrowth -ge $Script:GROWING_THRESHOLD_GB_DAY -and ($null -eq $result.NumericDays -or $result.NumericDays -gt $Script:ATTENTION_DAYS_HIGH)) { $result.Status = "Growing" } - # Priority 7: Stable - < 0.1 GB/day OR zero growth else { $result.Status = "Stable" } - # Append (Limited) indicator if 7-30 data points (Section 9.4) if ($isLimited) { $result.Status = "$($result.Status) (Limited)" } - Write-VerboseLog "Drive ${DriveLetter}: Data point count: $pointCount, Status: $($result.Status)" - + Write-VerboseLog "Drive $($DriveRecord.drive_letter): $pointCount points, Status: $($result.Status)" return $result } # ============================================================================ -# PRIORITY RANKING (Section 9.5) +# PRIORITY RANKING # ============================================================================ function Get-StatusSortPriority { param([string]$Status) @@ -824,7 +906,6 @@ function Get-StatusSortPriority { function Get-ServerStatusSeverity { param([string]$Status) - # Section 9.7: Critical > Attention > Growing > Stable > Declining > Insufficient Data $baseStatus = $Status -replace '\s*\(Limited\)', '' switch ($baseStatus) { "Critical" { return 6 } @@ -837,35 +918,273 @@ function Get-ServerStatusSeverity { } } -function Sort-DataDrives { +function Sort-DriveAnalyses { param([array]$Analyses) - # Separate online and offline (Section 9.5) $online = @($Analyses | Where-Object { $_.DriveStatus -ne "Offline" }) $offline = @($Analyses | Where-Object { $_.DriveStatus -eq "Offline" }) - # Sort online drives by: status priority, then numeric days-until-full, then letter $sorted = @($online | Sort-Object -Property @( @{ Expression = { Get-StatusSortPriority $_.Status }; Ascending = $true }, - @{ Expression = { - if ($null -ne $_.NumericDays) { $_.NumericDays } - else { [double]::MaxValue } - }; Ascending = $true }, + @{ Expression = { if ($null -ne $_.NumericDays) { $_.NumericDays } else { [double]::MaxValue } }; Ascending = $true }, @{ Expression = { $_.Letter }; Ascending = $true } )) - # Append offline sorted alphabetically $offlineSorted = @($offline | Sort-Object -Property Letter) $result = @() if ($sorted.Count -gt 0) { $result += $sorted } if ($offlineSorted.Count -gt 0) { $result += $offlineSorted } - return $result } # ============================================================================ -# ALERTING (Section 11) +# SVG LINE CHART GENERATION +# ============================================================================ +function Build-SvgChart { + <# + .SYNOPSIS + Generates an inline SVG line chart of storage usage trends for all drives. + #> + param([array]$DriveAnalyses) + + $chartWidth = 680 + $chartHeight = 240 + $mLeft = 52 + $mRight = 12 + $mTop = 8 + $mBottom = 40 + $plotW = $chartWidth - $mLeft - $mRight + $plotH = $chartHeight - $mTop - $mBottom + + $sb = [System.Text.StringBuilder]::new() + [void]$sb.Append("") + + # Background + [void]$sb.Append("") + + # Collect all dates for global range + $allDates = [System.Collections.ArrayList]::new() + foreach ($analysis in $DriveAnalyses) { + if (-not $analysis.Metrics) { continue } + foreach ($m in $analysis.Metrics) { + $d = ConvertTo-SafeDateTime $m.timestamp + if ($d) { [void]$allDates.Add($d) } + } + } + + if ($allDates.Count -eq 0) { + [void]$sb.Append("Collecting data...") + [void]$sb.Append("") + return $sb.ToString() + } + + $sortedDates = $allDates | Sort-Object + $minDate = $sortedDates[0] + $maxDate = $sortedDates[-1] + $dateRange = [math]::Max(1, ($maxDate - $minDate).TotalDays) + + # Horizontal gridlines at 0%, 25%, 50%, 75%, 100% + foreach ($pct in @(0, 25, 50, 75, 100)) { + $y = [math]::Round($mTop + $plotH - ($pct / 100 * $plotH), 1) + $gridColor = if ($pct -eq 0 -or $pct -eq 100) { '#cbd5e1' } else { '#f1f5f9' } + [void]$sb.Append("") + [void]$sb.Append("${pct}%") + } + + # X-axis date labels (up to 7 labels) + $labelCount = [math]::Min(7, [math]::Max(2, [math]::Floor($dateRange))) + $labelInterval = $dateRange / ($labelCount - 1) + for ($i = 0; $i -lt $labelCount; $i++) { + $dayOffset = $i * $labelInterval + $x = [math]::Round($mLeft + ($dayOffset / $dateRange * $plotW), 1) + $dateLabel = $minDate.AddDays($dayOffset).ToString("MM/dd") + [void]$sb.Append("$dateLabel") + } + + # Plot lines for each drive + $colorIdx = 0 + $legendItems = [System.Collections.ArrayList]::new() + + foreach ($analysis in $DriveAnalyses) { + $color = $Script:CHART_COLORS[$colorIdx % $Script:CHART_COLORS.Count] + $colorIdx++ + + if (-not $analysis.Metrics -or $analysis.Metrics.Count -eq 0) { + [void]$legendItems.Add(@{ Label = $analysis.Letter; Color = $color; Type = $analysis.DriveType }) + continue + } + + $points = [System.Collections.ArrayList]::new() + foreach ($m in ($analysis.Metrics | Sort-Object timestamp)) { + $d = ConvertTo-SafeDateTime $m.timestamp + if (-not $d) { continue } + + $x = [math]::Round($mLeft + (($d - $minDate).TotalDays / $dateRange * $plotW), 1) + $y = [math]::Round($mTop + $plotH - ([double]$m.usagePercent / 100 * $plotH), 1) + [void]$points.Add("$x,$y") + } + + if ($points.Count -gt 1) { + [void]$sb.Append("") + } + elseif ($points.Count -eq 1) { + $coords = $points[0] -split ',' + [void]$sb.Append("") + } + + [void]$legendItems.Add(@{ Label = $analysis.Letter; Color = $color; Type = $analysis.DriveType }) + } + + # Legend row at bottom + $legendY = $mTop + $plotH + 30 + $legendX = $mLeft + foreach ($item in $legendItems) { + $label = $item.Label -replace ':$', '' + if ($item.Type -eq 'OS') { $label += " (OS)" } + + [void]$sb.Append("") + [void]$sb.Append("$label") + + $legendX += 65 + ($label.Length * 2) + if ($legendX -gt ($chartWidth - 80)) { + $legendX = $mLeft + $legendY += 16 + } + } + + [void]$sb.Append("") + return $sb.ToString() +} + +# ============================================================================ +# HTML STORAGE REPORT +# ============================================================================ +function Build-StorageReport { + <# + .SYNOPSIS + Generates a complete HTML storage report with SVG chart and summary table. + Designed for Ninja RMM WYSIWYG custom field. + #> + param( + [string]$Hostname, + [string]$ServerStatus, + [array]$AllAnalyses + ) + + # Status color mapping + $statusColors = @{ + "Critical" = @{ bg = '#dc2626'; grad = 'linear-gradient(135deg,#dc2626,#991b1b)' } + "Attention" = @{ bg = '#ea580c'; grad = 'linear-gradient(135deg,#ea580c,#c2410c)' } + "Growing" = @{ bg = '#d97706'; grad = 'linear-gradient(135deg,#d97706,#b45309)' } + "Stable" = @{ bg = '#16a34a'; grad = 'linear-gradient(135deg,#16a34a,#15803d)' } + "Declining" = @{ bg = '#2563eb'; grad = 'linear-gradient(135deg,#2563eb,#1d4ed8)' } + "Insufficient Data" = @{ bg = '#64748b'; grad = 'linear-gradient(135deg,#64748b,#475569)' } + } + $statusBadgeColors = @{ + "Critical" = @{ bg = '#fef2f2'; fg = '#dc2626'; border = '#fecaca' } + "Attention" = @{ bg = '#fff7ed'; fg = '#ea580c'; border = '#fed7aa' } + "Growing" = @{ bg = '#fffbeb'; fg = '#d97706'; border = '#fde68a' } + "Stable" = @{ bg = '#f0fdf4'; fg = '#16a34a'; border = '#bbf7d0' } + "Declining" = @{ bg = '#eff6ff'; fg = '#2563eb'; border = '#bfdbfe' } + "Insufficient Data" = @{ bg = '#f8fafc'; fg = '#64748b'; border = '#e2e8f0' } + "Offline" = @{ bg = '#f8fafc'; fg = '#94a3b8'; border = '#e2e8f0' } + } + + $headerColor = $statusColors[$ServerStatus] + if (-not $headerColor) { $headerColor = $statusColors["Insufficient Data"] } + + $now = (Get-Date).ToString("yyyy-MM-dd HH:mm") + $driveCount = $AllAnalyses.Count + $totalPoints = ($AllAnalyses | ForEach-Object { $_.DataPoints } | Measure-Object -Sum).Sum + + # Build SVG chart + $svgChart = Build-SvgChart -DriveAnalyses $AllAnalyses + + # Build summary table rows + $tableRows = [System.Text.StringBuilder]::new() + $rowIndex = 0 + foreach ($a in $AllAnalyses) { + $color = $Script:CHART_COLORS[$rowIndex % $Script:CHART_COLORS.Count] + + $letterDisplay = $a.Letter -replace ':$', '' + $typeTag = if ($a.DriveType -eq 'OS') { ' (OS)' } else { '' } + $labelDisplay = if ($a.VolumeLabel) { " $($a.VolumeLabel)" } else { '' } + + $baseStatus = $a.Status -replace '\s*\(Limited\)', '' + $badge = $statusBadgeColors[$baseStatus] + if (-not $badge) { $badge = $statusBadgeColors["Offline"] } + $statusDisplay = $a.Status + $statusHtml = "$statusDisplay" + + $sizeStr = if ($a.TotalSizeGB -gt 0) { "$([math]::Round($a.TotalSizeGB, 0)) GB" } else { '-' } + $usedStr = if ($a.CurrentUsedGB -gt 0) { "$([math]::Round($a.CurrentUsedGB, 1)) GB" } else { '-' } + $freeStr = if ($a.CurrentFreeGB -gt 0) { "$([math]::Round($a.CurrentFreeGB, 1)) GB" } else { '-' } + + $growthStr = if ($a.GBPerMonth -match '^\-?\d') { "$($a.GBPerMonth)" } else { $a.GBPerMonth } + + $daysStr = $a.DaysUntilFull + if ($daysStr -eq "1825.00") { $daysStr = "5yr+" } + elseif ($daysStr -match '^\d+\.\d+$') { + $daysVal = [double]$daysStr + if ($daysVal -ge 365) { $daysStr = "$([math]::Round($daysVal / 365, 1))yr" } + elseif ($daysVal -ge 30) { $daysStr = "$([math]::Round($daysVal / 30, 1))mo" } + else { $daysStr = "$([math]::Round($daysVal, 0))d" } + } + + $rowBg = if ($rowIndex % 2 -eq 0) { '#ffffff' } else { '#f8fafc' } + + [void]$tableRows.Append(@" + +$letterDisplay$typeTag$labelDisplay +$sizeStr +$usedStr +$freeStr +$growthStr +$daysStr +$statusHtml + +"@) + $rowIndex++ + } + + # Assemble full HTML report + $html = @" +
+
+
$Hostname
+
Storage: $ServerStatus | $now
+
+
+$svgChart +
+
+ + + + + + + + + + + +$($tableRows.ToString()) + +
DriveSizeUsedFreeGB/MoFullStatus
+
+
+v$($Script:VERSION) | $driveCount drives | $totalPoints pts | OLS regression | SQLite +
+
+"@ + + return $html +} + +# ============================================================================ +# ALERTING (Event Log) # ============================================================================ function Initialize-EventSource { try { @@ -876,7 +1195,6 @@ function Initialize-EventSource { } catch { Write-Log "WARNING: Could not register event log source: $_" - Write-Log "WARNING: Event log writing will be skipped" return $false } return $true @@ -890,7 +1208,6 @@ function Write-CriticalAlert { if ($CriticalDrives.Count -eq 0) { return } - # Section 11.4 message formats if ($CriticalDrives.Count -eq 1) { $d = $CriticalDrives[0] $daysText = if ($d.DaysUntilFull -match '^\d') { "$($d.DaysUntilFull) days until full" } else { $d.DaysUntilFull } @@ -914,226 +1231,139 @@ function Write-CriticalAlert { } } -function Write-ExcessDriveAlert { - param( - [int]$DriveCount, - [array]$ExcludedDrives, - [string]$Hostname - ) - - $excludedList = ($ExcludedDrives | ForEach-Object { $_.Letter }) -join ", " - $message = "STORAGE MONITORING: Server $Hostname has $DriveCount data drives but only 3 can be reported. Excluded drives: $excludedList. Script update may be required." - - try { - Write-EventLog -LogName Application -Source $Script:EVENT_SOURCE -EventId 5002 -EntryType Warning -Message $message -ErrorAction Stop - Write-Log "! Excess drive alert written to Event Log (ID 5002)" - } - catch { - Write-Log "ERROR: Failed to write Event 5002: $_" - } -} - # ============================================================================ -# NINJA RMM INTEGRATION (Section 10) +# NINJA RMM INTEGRATION (single WYSIWYG field) # ============================================================================ -function Update-NinjaFields { - param( - [string]$ServerStatus, - [hashtable]$OSAnalysis, - [array]$DataDriveSlots - ) +function Update-NinjaField { + <# + .SYNOPSIS + Writes the HTML storage report to a single Ninja RMM WYSIWYG custom field. + Requires a WYSIWYG field named 'storageReport' in Ninja RMM. + #> + param([string]$HtmlReport) $runningInNinja = $null -ne (Get-Command "Ninja-Property-Set" -ErrorAction SilentlyContinue) if (-not $runningInNinja) { return $false } try { - # Overall Status (Section 10.1) - Ninja-Property-Set $Script:FIELD_SERVER_STATUS $ServerStatus - - # OS Drive fields - if ($OSAnalysis) { - Ninja-Property-Set $Script:FIELD_OS_STATUS $OSAnalysis.Status - Ninja-Property-Set $Script:FIELD_OS_GROWTH $OSAnalysis.GBPerMonth - Ninja-Property-Set $Script:FIELD_OS_DAYS $OSAnalysis.DaysUntilFull - } - else { - Ninja-Property-Set $Script:FIELD_OS_STATUS "NO DRIVE" - Ninja-Property-Set $Script:FIELD_OS_GROWTH "NO DRIVE" - Ninja-Property-Set $Script:FIELD_OS_DAYS "NO DRIVE" - } - - # Data Drive 1-3 fields (Section 10.4) - for ($i = 0; $i -lt $Script:MAX_DATA_DRIVES; $i++) { - $slotNum = $i + 1 - $slot = if ($i -lt $DataDriveSlots.Count) { $DataDriveSlots[$i] } else { $null } - - $letterField = $Script:FIELD_DATA_LETTER -f $slotNum - $statusField = $Script:FIELD_DATA_STATUS -f $slotNum - $growthField = $Script:FIELD_DATA_GROWTH -f $slotNum - $daysField = $Script:FIELD_DATA_DAYS -f $slotNum - - if ($null -ne $slot -and $slot.Status -ne "NO DRIVE") { - # Letter display: strip colon for Ninja, add (OFFLINE) if offline - $letterDisplay = if ($slot.DriveStatus -eq "Offline") { - "$($slot.Letter -replace ':$', '') (OFFLINE)" - } else { - $slot.Letter -replace ':$', '' - } - - Ninja-Property-Set $letterField $letterDisplay - Ninja-Property-Set $statusField $slot.Status - Ninja-Property-Set $growthField $slot.GBPerMonth - Ninja-Property-Set $daysField $slot.DaysUntilFull - } - else { - # Empty slot (Section 10.2) - Ninja-Property-Set $letterField "NO DRIVE" - Ninja-Property-Set $statusField "NO DRIVE" - Ninja-Property-Set $growthField "NO DRIVE" - Ninja-Property-Set $daysField "NO DRIVE" - } - } - + Ninja-Property-Set $Script:FIELD_STORAGE_REPORT $HtmlReport return $true } catch { - Write-Log "ERROR: Failed to update Ninja fields: $_" + Write-Log "ERROR: Failed to update Ninja field: $_" return $false } } # ============================================================================ -# CONSOLE OUTPUT (Section 19) +# CONSOLE OUTPUT # ============================================================================ function Write-Summary { param( [string]$Hostname, - [hashtable]$OSAnalysis, - [array]$DataDriveSlots, [string]$ServerStatus, - [bool]$IsNinja, - [array]$NewCriticalDrives + [array]$AllAnalyses, + [array]$NewCriticalDrives, + [bool]$IsNinja ) Write-Log "Storage Growth Analysis - $Hostname" Write-Log ([char]0x2550 * 63) Write-Log "" - # OS Drive section - if ($OSAnalysis) { - Write-Log "OS DRIVE (Auto-detected: $($OSAnalysis.Letter))" - $labelDisplay = if ($OSAnalysis.VolumeLabel) { $OSAnalysis.VolumeLabel } else { $OSAnalysis.Letter } - Write-Log " Drive $($OSAnalysis.Letter) ($labelDisplay)" - - # Show current usage if we have data points - if ($OSAnalysis.CurrentUsedGB -gt 0 -or $OSAnalysis.CurrentFreeGB -gt 0) { - $usedStr = $OSAnalysis.CurrentUsedGB.ToString("F3") - $totalStr = $OSAnalysis.TotalSizeGB.ToString("F3") - $pctStr = $OSAnalysis.CurrentPercent.ToString("F2") - Write-Log " Current: $usedStr GB / $totalStr GB ($pctStr%)" + # OS Drive first + $osDrive = $AllAnalyses | Where-Object { $_.DriveType -eq 'OS' } | Select-Object -First 1 + if ($osDrive) { + Write-Log "OS DRIVE ($($osDrive.Letter))" + $labelDisplay = if ($osDrive.VolumeLabel) { $osDrive.VolumeLabel } else { $osDrive.Letter } + Write-Log " Drive $($osDrive.Letter) ($labelDisplay)" + + if ($osDrive.CurrentUsedGB -gt 0 -or $osDrive.CurrentFreeGB -gt 0) { + Write-Log " Current: $($osDrive.CurrentUsedGB.ToString('F1')) / $($osDrive.TotalSizeGB.ToString('F1')) GB ($($osDrive.CurrentPercent.ToString('F1'))%)" } - $baseStatus = $OSAnalysis.Status -replace '\s*\(Limited\)', '' + $baseStatus = $osDrive.Status -replace '\s*\(Limited\)', '' if ($baseStatus -ne "Insufficient Data" -and $baseStatus -ne "Offline") { - if ($OSAnalysis.GBPerMonth -ne "Insufficient Data" -and $OSAnalysis.GBPerMonth -ne "OFFLINE") { - Write-Log " Growth: $($OSAnalysis.GBPerMonth) GB/month" - - $daysDisplay = $OSAnalysis.DaysUntilFull - if ($daysDisplay -eq "1825.00") { - $daysDisplay = "1825.00 days until full - capped" - } elseif ($daysDisplay -match '^\d') { - $daysDisplay = "$daysDisplay days until full" - } - Write-Log " Status: $($OSAnalysis.Status) ($daysDisplay)" - } + Write-Log " Growth: $($osDrive.GBPerMonth) GB/month" + $daysDisplay = $osDrive.DaysUntilFull + if ($daysDisplay -match '^\d') { $daysDisplay = "$daysDisplay days" } + Write-Log " Status: $($osDrive.Status) ($daysDisplay)" } else { - Write-Log " Status: $($OSAnalysis.Status)" + Write-Log " Status: $($osDrive.Status)" } } Write-Log "" - Write-Log "DATA DRIVES (Ranked by Criticality)" + Write-Log "DATA DRIVES" Write-Log ([char]0x2500 * 63) - for ($i = 0; $i -lt $Script:MAX_DATA_DRIVES; $i++) { - $slotNum = $i + 1 - $slot = if ($i -lt $DataDriveSlots.Count) { $DataDriveSlots[$i] } else { $null } - - if ($null -ne $slot -and $slot.Status -ne "NO DRIVE") { - $labelDisplay = if ($slot.VolumeLabel) { $slot.VolumeLabel } else { $slot.Letter } - Write-Log " [$slotNum] Drive $($slot.Letter) ($labelDisplay)" + $dataDrives = @($AllAnalyses | Where-Object { $_.DriveType -ne 'OS' }) + if ($dataDrives.Count -eq 0) { + Write-Log " No data drives detected" + } + else { + $idx = 1 + foreach ($d in $dataDrives) { + $labelDisplay = if ($d.VolumeLabel) { $d.VolumeLabel } else { $d.Letter } + Write-Log " [$idx] Drive $($d.Letter) ($labelDisplay)" - if ($slot.DriveStatus -eq "Offline") { + if ($d.DriveStatus -eq "Offline") { Write-Log " Status: OFFLINE" } - elseif (($slot.Status -replace '\s*\(Limited\)', '') -eq "Insufficient Data") { - Write-Log " Status: $($slot.Status)" + elseif (($d.Status -replace '\s*\(Limited\)', '') -eq "Insufficient Data") { + Write-Log " Status: $($d.Status)" } else { - # Show current usage - if ($slot.CurrentUsedGB -gt 0 -or $slot.CurrentFreeGB -gt 0) { - $usedStr = ([double]$slot.CurrentUsedGB).ToString("F3") - $totalStr = ([double]$slot.TotalSizeGB).ToString("F3") - $pctStr = ([double]$slot.CurrentPercent).ToString("F2") - Write-Log " Current: $usedStr GB / $totalStr GB ($pctStr%)" + if ($d.CurrentUsedGB -gt 0 -or $d.CurrentFreeGB -gt 0) { + Write-Log " Current: $($d.CurrentUsedGB.ToString('F1')) / $($d.TotalSizeGB.ToString('F1')) GB ($($d.CurrentPercent.ToString('F1'))%)" } + Write-Log " Growth: $($d.GBPerMonth) GB/month" + $daysDisplay = $d.DaysUntilFull + if ($daysDisplay -match '^\d') { $daysDisplay = "$daysDisplay days" } + Write-Log " Status: $($d.Status) ($daysDisplay)" - Write-Log " Growth: $($slot.GBPerMonth) GB/month" - - $statusUpper = ($slot.Status -replace '\s*\(Limited\)', '').ToUpper() - $limitedTag = if ($slot.IsLimited) { " (Limited)" } else { "" } - $daysDisplay = $slot.DaysUntilFull - if ($daysDisplay -eq "1825.00") { - $daysDisplay = "1825.00 days until full - capped" - } elseif ($daysDisplay -match '^\d') { - $daysDisplay = "$daysDisplay days until full" - } - Write-Log " Status: $statusUpper$limitedTag ($daysDisplay)" - - # Alert indicator for newly critical drives - $baseSlotStatus = $slot.Status -replace '\s*\(Limited\)', '' + $baseSlotStatus = $d.Status -replace '\s*\(Limited\)', '' if ($baseSlotStatus -eq "Critical" -and $NewCriticalDrives) { - $isNewCritical = $slot.Letter -in @($NewCriticalDrives | ForEach-Object { $_.Letter }) + $isNewCritical = $d.Letter -in @($NewCriticalDrives | ForEach-Object { $_.Letter }) if ($isNewCritical) { Write-Log " Alert: NEW - Event 5001 written" } } } + + Write-Log "" + $idx++ } - else { - Write-Log " [$slotNum] NO DRIVE" - } - Write-Log "" } Write-Log ([char]0x2550 * 63) Write-Log "SERVER STATUS: $($ServerStatus.ToUpper())" + Write-Log "Database: $($Script:DB_PATH)" Write-Log "" } # ============================================================================ -# MAIN EXECUTION (Section 12) +# MAIN EXECUTION # ============================================================================ function Main { $hostname = $env:COMPUTERNAME + $deviceId = $env:COMPUTERNAME $runningInNinja = $null -ne (Get-Command "Ninja-Property-Set" -ErrorAction SilentlyContinue) # ── Step 1: Initialize ─────────────────────────────────────────────────── - # Test mode banner (Section 13) if (-not $runningInNinja) { Write-Log "*** TEST MODE - Not running in Ninja context ***" Write-Log "Ninja custom field updates will be skipped." Write-Log "" } - # Verbose initialization info (Section 14.4) Write-VerboseLog "PowerShell Version: $($PSVersionTable.PSVersion.ToString())" Write-VerboseLog "Script Version: $($Script:VERSION)" - Write-VerboseLog "Storage Path: $($Script:STORAGE_PATH)" + Write-VerboseLog "Database Path: $($Script:DB_PATH)" - # Create storage folder (exit 1 on failure - Section 15.2) + # Create storage folder if (-not (Test-Path $Script:STORAGE_PATH)) { try { New-Item -Path $Script:STORAGE_PATH -ItemType Directory -Force -ErrorAction Stop | Out-Null @@ -1141,22 +1371,40 @@ function Main { } catch { Write-Log "CRITICAL: Cannot create storage folder: $_" - Write-Log "Script cannot proceed. Exiting." Save-LogFile exit 1 } } - # Register event log source (Section 11.3) + # Initialize PSSQLite module + if (-not (Initialize-SQLiteModule)) { + Write-Log "CRITICAL: PSSQLite module is required. Cannot proceed." + Save-LogFile + exit 1 + } + + # Initialize database schema + if (-not (Initialize-Database)) { + Write-Log "CRITICAL: Database initialization failed. Cannot proceed." + Save-LogFile + exit 1 + } + + # Register event log source $eventLogAvailable = Initialize-EventSource - # Load existing history - $history = Load-History + # Register device + Get-DeviceRecord -DeviceId $deviceId -Hostname $hostname - # Capture previous excess-drive alert state for fire-once logic (Section 11.2) - $previousExcessAlertSent = [bool]$history.excessDriveAlertSent + # ── Step 2: Migrate Legacy Data ────────────────────────────────────────── - # ── Step 2: Discover & Collect ─────────────────────────────────────────── + # Check for existing JSON history and migrate (one-time) + $existingDrives = @(Get-AllDeviceDrives -DeviceId $deviceId) + if ($existingDrives.Count -eq 0) { + Import-LegacyJsonHistory -DeviceId $deviceId + } + + # ── Step 3: Discover & Collect ─────────────────────────────────────────── $currentDrives = $null try { @@ -1164,8 +1412,6 @@ function Main { } catch { Write-Log "ERROR: Drive enumeration failed: $_" - Write-Log "Preserving existing data, skipping collection." - Save-History -History $history Save-LogFile exit 2 } @@ -1174,90 +1420,65 @@ function Main { Write-Log "WARNING: No qualifying drives found." } - # ── Step 3: Update Drive Status ────────────────────────────────────────── + # ── Step 4: Update Database ────────────────────────────────────────────── - $history = Update-History -History $history -CurrentDrives $currentDrives + $now = (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") + $visibleLetters = @($currentDrives | ForEach-Object { $_.Letter }) - # ── Step 4: Calculate Trends ───────────────────────────────────────────── + # Upsert drives and insert metrics + foreach ($drive in $currentDrives) { + $driveId = Save-DriveRecord -DeviceId $deviceId -DriveLetter $drive.Letter ` + -VolumeLabel $drive.VolumeLabel -TotalSizeGB $drive.TotalSizeGB ` + -DriveType $drive.DriveType -Status "Online" - $osAnalysis = $null - $dataAnalyses = [System.Collections.ArrayList]::new() + Add-MetricRecord -DriveId $driveId -Timestamp $now ` + -UsedGB $drive.UsedGB -FreeGB $drive.FreeGB -UsagePercent $drive.UsagePercent - foreach ($letter in @($history.drives.Keys)) { - $driveData = $history.drives[$letter] - $analysis = Get-DriveAnalysis -DriveData $driveData -DriveLetter $letter + Write-VerboseLog "Drive $($drive.Letter): Metric recorded (Used: $($drive.UsedGB) GB, Free: $($drive.FreeGB) GB)" + } - if ($driveData.driveType -eq "OS") { - $osAnalysis = $analysis - } - else { - [void]$dataAnalyses.Add($analysis) + # Mark drives not currently visible as Offline + $allDrives = @(Get-AllDeviceDrives -DeviceId $deviceId) + foreach ($dbDrive in $allDrives) { + if ($dbDrive.drive_letter -notin $visibleLetters -and $dbDrive.status -ne "Offline") { + Write-VerboseLog "Drive $($dbDrive.drive_letter): No longer visible - marking Offline" + Set-DriveOffline -DriveId $dbDrive.id } } - # ── Step 5: Rank & Organize ────────────────────────────────────────────── + # Remove drives offline > 30 days + $removedCount = Remove-StaleDrives -DeviceId $deviceId - $sortedDataDrives = @(Sort-DataDrives -Analyses $dataAnalyses) + # Prune old metrics + Remove-OldMetrics - # ── Step 6: Check Drive Count (Section 10.3) ──────────────────────────── + # ── Step 5: Analyze All Drives ─────────────────────────────────────────── - $onlineDataCount = @($dataAnalyses | Where-Object { $_.DriveStatus -ne "Offline" }).Count - $excludedDrives = @() - $reportedDataDrives = $sortedDataDrives + $allDrives = @(Get-AllDeviceDrives -DeviceId $deviceId) + $allAnalyses = [System.Collections.ArrayList]::new() - if ($sortedDataDrives.Count -gt $Script:MAX_DATA_DRIVES) { - $reportedDataDrives = @($sortedDataDrives[0..($Script:MAX_DATA_DRIVES - 1)]) - $excludedDrives = @($sortedDataDrives[$Script:MAX_DATA_DRIVES..($sortedDataDrives.Count - 1)]) - - Write-Log "NOTE: $($sortedDataDrives.Count) data drives detected, reporting top $($Script:MAX_DATA_DRIVES)" - foreach ($excl in $excludedDrives) { - Write-Log " Excluded: $($excl.Letter)" - } + foreach ($dbDrive in $allDrives) { + $metrics = @(Get-DriveMetrics -DriveId $dbDrive.id) + $analysis = Get-DriveAnalysis -DriveRecord $dbDrive -Metrics $metrics + [void]$allAnalyses.Add($analysis) } - # Update excess-drive alert flag (Section 11.2 - Event 5002 fire-once) - if ($onlineDataCount -gt $Script:MAX_DATA_DRIVES) { - $history.excessDriveAlertSent = $true - } - else { - $history.excessDriveAlertSent = $false - } - - # Fire Event 5002 only on NEW transition (was false, now true) - if ($onlineDataCount -gt $Script:MAX_DATA_DRIVES -and -not $previousExcessAlertSent -and $eventLogAvailable) { - Write-ExcessDriveAlert -DriveCount $onlineDataCount -ExcludedDrives $excludedDrives -Hostname $hostname - } - - # Pad data drive slots to 3 (Section 10.2) - $dataSlots = [System.Collections.ArrayList]::new() - foreach ($d in $reportedDataDrives) { - [void]$dataSlots.Add($d) - } - while ($dataSlots.Count -lt $Script:MAX_DATA_DRIVES) { - [void]$dataSlots.Add(@{ - Letter = "NO DRIVE" - VolumeLabel = "" - Status = "NO DRIVE" - GBPerMonth = "NO DRIVE" - DaysUntilFull = "NO DRIVE" - DriveStatus = "NO DRIVE" - IsLimited = $false - CurrentUsedGB = 0 - CurrentFreeGB = 0 - CurrentPercent = 0 - TotalSizeGB = 0 - }) - } + # Sort: OS first, then data drives by criticality + $osAnalyses = @($allAnalyses | Where-Object { $_.DriveType -eq 'OS' }) + $dataAnalyses = @($allAnalyses | Where-Object { $_.DriveType -ne 'OS' }) + $sortedData = @(Sort-DriveAnalyses -Analyses $dataAnalyses) + + $sortedAll = [System.Collections.ArrayList]::new() + foreach ($a in $osAnalyses) { [void]$sortedAll.Add($a) } + foreach ($a in $sortedData) { [void]$sortedAll.Add($a) } + + # ── Step 6: Server Status ──────────────────────────────────────────────── - # Determine overall server status - worst case among online drives (Section 9.7) $worstSeverity = 0 $serverStatus = "Insufficient Data" - $allOnlineAnalyses = @() - if ($osAnalysis -and $osAnalysis.DriveStatus -ne "Offline") { $allOnlineAnalyses += $osAnalysis } - $allOnlineAnalyses += @($dataAnalyses | Where-Object { $_.DriveStatus -ne "Offline" }) - - foreach ($analysis in $allOnlineAnalyses) { + $onlineAnalyses = @($allAnalyses | Where-Object { $_.DriveStatus -ne "Offline" }) + foreach ($analysis in $onlineAnalyses) { $severity = Get-ServerStatusSeverity -Status $analysis.Status if ($severity -gt $worstSeverity) { $worstSeverity = $severity @@ -1265,79 +1486,65 @@ function Main { } } - # ── Step 7: Check Critical - Fire-Once Logic (Section 11.2) ───────────── + # ── Step 7: Critical Drive Alerts (fire-once) ──────────────────────────── $newCriticalDrives = [System.Collections.ArrayList]::new() - foreach ($letter in @($history.drives.Keys)) { - $driveData = $history.drives[$letter] - - # Find matching analysis result - $analysis = $null - if ($osAnalysis -and $osAnalysis.Letter -eq $letter) { - $analysis = $osAnalysis - } - else { - $analysis = $dataAnalyses | Where-Object { $_.Letter -eq $letter } | Select-Object -First 1 - } - - if (-not $analysis) { continue } - + foreach ($analysis in $allAnalyses) { $baseStatus = $analysis.Status -replace '\s*\(Limited\)', '' if ($baseStatus -eq "Critical") { - if (-not $driveData.alertSent) { - # Transition TO Critical - fire alert + if (-not $analysis.AlertSent) { [void]$newCriticalDrives.Add($analysis) - $driveData.alertSent = $true - Write-VerboseLog "Drive ${letter}: alertSent was FALSE, setting to TRUE, writing Event 5001" - } - else { - # Already alerted - skip - Write-VerboseLog "Drive ${letter}: alertSent was TRUE, skipping Event 5001" + Set-DriveAlertSent -DriveId $analysis.DriveId -AlertSent $true + Write-VerboseLog "Drive $($analysis.Letter): NEW Critical - writing Event 5001" } } else { - # No longer Critical - reset flag for next incident - if ($driveData.alertSent) { - $driveData.alertSent = $false - Write-VerboseLog "Drive ${letter}: No longer Critical, resetting alertSent to FALSE" + if ($analysis.AlertSent) { + Set-DriveAlertSent -DriveId $analysis.DriveId -AlertSent $false + Write-VerboseLog "Drive $($analysis.Letter): No longer Critical, resetting alert" } } } - # Write combined Event 5001 if any drives transitioned to Critical (Section 11.4) if ($newCriticalDrives.Count -gt 0 -and $eventLogAvailable) { Write-CriticalAlert -CriticalDrives $newCriticalDrives -Hostname $hostname } - # ── Step 8: Persist & Report ───────────────────────────────────────────── + # ── Step 8: Generate Report & Output ───────────────────────────────────── - # Console output summary - Write-Summary -Hostname $hostname -OSAnalysis $osAnalysis -DataDriveSlots $dataSlots ` - -ServerStatus $serverStatus -IsNinja $runningInNinja -NewCriticalDrives $newCriticalDrives + # Console summary + Write-Summary -Hostname $hostname -ServerStatus $serverStatus -AllAnalyses @($sortedAll) ` + -NewCriticalDrives $newCriticalDrives -IsNinja $runningInNinja - # Update Ninja custom fields or show test mode message + # Build HTML report with SVG chart + $htmlReport = Build-StorageReport -Hostname $hostname -ServerStatus $serverStatus -AllAnalyses @($sortedAll) + + # Update Ninja WYSIWYG field if ($runningInNinja) { - $ninjaSuccess = Update-NinjaFields -ServerStatus $serverStatus -OSAnalysis $osAnalysis -DataDriveSlots $dataSlots + $ninjaSuccess = Update-NinjaField -HtmlReport $htmlReport if ($ninjaSuccess) { - Write-Log ([char]0x2713 + " CUSTOM FIELDS FILLED") - Write-Log " - OS Drive: $(if ($osAnalysis) { $osAnalysis.Letter } else { 'NO DRIVE' })" - for ($i = 0; $i -lt $Script:MAX_DATA_DRIVES; $i++) { - $slot = $dataSlots[$i] - $display = if ($slot.Status -eq "NO DRIVE") { "NO DRIVE" } else { $slot.Letter } - Write-Log " - Data Drive $($i + 1): $display" - } + Write-Log ([char]0x2713 + " WYSIWYG field '$($Script:FIELD_STORAGE_REPORT)' updated") } } else { - Write-Log "*** TEST MODE - Ninja fields not updated ***" + Write-Log "*** TEST MODE - Ninja field not updated ***" + Write-VerboseLog "HTML report generated ($($htmlReport.Length) chars)" } - # Save history JSON - Save-History -History $history + # ── Step 9: Finalize ───────────────────────────────────────────────────── + + # Log database stats + $totalDrives = $allDrives.Count + try { + $totalMetrics = Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "SELECT COUNT(*) AS cnt FROM metric" -ErrorAction Stop + $metricCount = if ($totalMetrics) { $totalMetrics.cnt } else { 0 } + } + catch { $metricCount = '?' } + Write-Log ([char]0x2713 + " Database updated ($totalDrives drives, $metricCount data points)") - # Write log file with rotation + # Write log file Save-LogFile exit 0 From 61f62f7526478dde838488c9c37495ef2be793ef Mon Sep 17 00:00:00 2001 From: Zach Boogher <129975920+AlrightLad@users.noreply.github.com> Date: Wed, 25 Feb 2026 03:36:47 -0500 Subject: [PATCH 7/9] Address CodeRabbit review round 5 and harden script for production MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Resolves all 9 unresolved findings from CodeRabbit review round 5 (review ID 3852355772) plus 7 additional issues caught during self-review. No behavioral changes to the monitoring logic — all fixes target correctness, safety, and robustness. Critical bug fix: [char]0x2550 * 63 performed numeric multiplication (produced 601776) instead of string repetition — fixed to "$([char]0x2550)" * 63 in all three Write-Summary separator lines Data integrity (3 fixes): Added Set-DailyMetric function that checks for an existing same-day metric before inserting, preventing duplicate rows when the script runs more than once per day (e.g. during testing or if the scheduled task fires twice) Guarded against [double]$null silently casting to 0.0 in legacy JSON migration — incomplete entries are now skipped with verbose logging instead of corrupting regression data Changed migration file rename from -ErrorAction SilentlyContinue to -ErrorAction Stop so a failed rename surfaces in the try/catch rather than silently allowing re-migration on next run Security (2 fixes): SecurityProtocol = Tls12 overwrote the entire protocol mask, disabling TLS 1.3 on .NET 5+ — now uses -bor to add TLS 1.2 without stripping existing protocols Volume labels, drive letters, and hostname are now HTML-encoded via [System.Net.WebUtility]::HtmlEncode() before interpolation into the WYSIWYG report to prevent XSS injection Pipeline safety: Added | Out-Null to all 19 DML Invoke-SqliteQuery calls (INSERT, UPDATE, DELETE, CREATE TABLE, CREATE INDEX) to prevent implicit output from leaking into function return values and corrupting callers Correctness (2 fixes): Reverted SELECT last_insert_rowid() back to re-query by UNIQUE constraint — last_insert_rowid() is connection-scoped and PSSQLite opens a new connection per -DataSource call, making the value unreliable Fixed Set-DailyMetric day boundary from < T23:59:59 to < next-day T00:00:00 to eliminate off-by-one at the day boundary Maintainability (3 fixes): Hardcoded "1825.00" sentinel in days-until-full display now derived from $Script:DAYS_CAP constant SVG chart viewBox height is now computed dynamically from legend row count, preventing legend clipping with 5+ drives $removedCount from Remove-StaleDrives is now logged instead of silently discarded Documentation/style: Added TODO comment to alert_state table explaining it's scaffolding for future centralized alerting Simplified redundant conditional in $growthStr (both branches returned identical values) Documented $env:COMPUTERNAME device_id hostname-change limitation in script header Legacy JSON path now uses $env:ProgramData instead of hardcoded C:\ProgramData Moved $Script:LOG_PRUNE_THRESHOLD_KB from inside Save-LogFile function body to the constants section --- rmm-ninja/ServerGrowthTracking | 211 ++++++++++++++++++++++++--------- 1 file changed, 152 insertions(+), 59 deletions(-) diff --git a/rmm-ninja/ServerGrowthTracking b/rmm-ninja/ServerGrowthTracking index 8c69d80..e9907bd 100644 --- a/rmm-ninja/ServerGrowthTracking +++ b/rmm-ninja/ServerGrowthTracking @@ -20,6 +20,11 @@ - Create ONE WYSIWYG custom field named "storageReport" - The script populates it with an HTML dashboard including SVG line chart + Limitation: device_id is derived from $env:COMPUTERNAME. A hostname rename + will create a new device row and orphan historical data under the old name. + If stable identity across renames is required, consider switching to a + hardware identifier (e.g. Win32_ComputerSystemProduct.UUID). + .PARAMETER Verbose Enable detailed diagnostic output for troubleshooting. @@ -47,6 +52,7 @@ $Script:EVENT_SOURCE = "StorageGrowthMonitor" # Retention & thresholds $Script:RETENTION_DAYS = 65 $Script:LOG_RETENTION_DAYS = 90 +$Script:LOG_PRUNE_THRESHOLD_KB = 256 $Script:OFFLINE_REMOVAL_DAYS = 30 $Script:MIN_DATA_POINTS = 7 $Script:FULL_CONFIDENCE_POINTS = 30 @@ -64,7 +70,7 @@ $Script:EXCLUDED_FILESYSTEMS = @("FAT", "FAT32", "RAW") $Script:FIELD_STORAGE_REPORT = "storageReport" # Legacy JSON paths (for migration) -$Script:LEGACY_JSON_PATH = "C:\ProgramData\NinjaRMM\StorageMetrics\storage_history.json" +$Script:LEGACY_JSON_PATH = Join-Path $env:ProgramData "NinjaRMM\StorageMetrics\storage_history.json" # Chart color palette (colorblind-friendly) $Script:CHART_COLORS = @('#2563eb', '#16a34a', '#ea580c', '#9333ea', '#0d9488', '#dc2626', '#ca8a04', '#be185d') @@ -118,8 +124,6 @@ function Write-VerboseLog { # LOG FILE MANAGEMENT # ============================================================================ function Save-LogFile { - $Script:LOG_PRUNE_THRESHOLD_KB = 256 - try { if ($Script:LogBuffer.Count -gt 0) { $Script:LogBuffer | Add-Content -Path $Script:LOG_FILE -Encoding UTF8 -ErrorAction Stop @@ -198,7 +202,8 @@ function Initialize-SQLiteModule { # Install from PSGallery Write-Log "PSSQLite module not found - installing from PSGallery..." try { - [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 + # Add TLS 1.2 without removing existing protocols (preserves TLS 1.3 on .NET 5+) + [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 # Ensure NuGet provider is available $nuget = Get-PackageProvider -Name NuGet -ErrorAction SilentlyContinue @@ -241,7 +246,7 @@ function Initialize-Database { created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%S', 'now', 'localtime')), updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%S', 'now', 'localtime')) ) -"@ -ErrorAction Stop +"@ -ErrorAction Stop | Out-Null # Drive table - one row per drive per device Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" @@ -258,7 +263,7 @@ function Initialize-Database { created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%S', 'now', 'localtime')), UNIQUE(device_id, drive_letter) ) -"@ -ErrorAction Stop +"@ -ErrorAction Stop | Out-Null # Metric table - time-series storage data points Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" @@ -270,9 +275,11 @@ function Initialize-Database { free_gb REAL NOT NULL, usage_percent REAL NOT NULL ) -"@ -ErrorAction Stop +"@ -ErrorAction Stop | Out-Null - # Alert state table - fire-once tracking per device + # Alert state table - scaffolding for future centralized alerting service. + # Not consumed by this script; fire-once logic uses drive.alert_sent instead. + # TODO: Wire up when centralized API is available (see Ticket 1123004 roadmap). Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" CREATE TABLE IF NOT EXISTS alert_state ( device_id TEXT NOT NULL REFERENCES device(device_id), @@ -281,11 +288,11 @@ function Initialize-Database { last_triggered TEXT, PRIMARY KEY (device_id, alert_type) ) -"@ -ErrorAction Stop +"@ -ErrorAction Stop | Out-Null # Indexes for efficient queries - Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "CREATE INDEX IF NOT EXISTS idx_metric_drive_ts ON metric(drive_id, timestamp)" -ErrorAction Stop - Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "CREATE INDEX IF NOT EXISTS idx_drive_device ON drive(device_id)" -ErrorAction Stop + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "CREATE INDEX IF NOT EXISTS idx_metric_drive_ts ON metric(drive_id, timestamp)" -ErrorAction Stop | Out-Null + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "CREATE INDEX IF NOT EXISTS idx_drive_device ON drive(device_id)" -ErrorAction Stop | Out-Null Write-VerboseLog "Database initialized: $($Script:DB_PATH)" return $true @@ -322,7 +329,7 @@ function Get-DeviceRecord { hostname = $Hostname os = $osVersion now = $now - } + } | Out-Null } else { Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "INSERT INTO device (device_id, hostname, os_version, created_at, updated_at) VALUES (@id, @hostname, @os, @now, @now)" -SqlParameters @{ @@ -330,7 +337,7 @@ function Get-DeviceRecord { hostname = $Hostname os = $osVersion now = $now - } + } | Out-Null } } @@ -382,7 +389,7 @@ function Save-DriveRecord { type = $DriveType status = $Status now = $now - } + } | Out-Null return $existing.id } else { @@ -397,7 +404,7 @@ function Save-DriveRecord { type = $DriveType status = $Status now = $now - } + } | Out-Null $newDrive = Get-DriveRecord -DeviceId $DeviceId -DriveLetter $DriveLetter return $newDrive.id @@ -426,6 +433,57 @@ function Add-MetricRecord { used = $UsedGB free = $FreeGB pct = $UsagePercent + } | Out-Null +} + +function Set-DailyMetric { + <# + .SYNOPSIS + Inserts or updates the metric for a drive for the current calendar day. + Prevents duplicate rows when the script runs more than once per day. + #> + param( + [int]$DriveId, + [string]$Timestamp, + [double]$UsedGB, + [double]$FreeGB, + [double]$UsagePercent + ) + + $parsed = ConvertTo-SafeDateTime $Timestamp + if ($null -eq $parsed) { + Add-MetricRecord -DriveId $DriveId -Timestamp $Timestamp -UsedGB $UsedGB -FreeGB $FreeGB -UsagePercent $UsagePercent + return + } + + $dayStart = $parsed.ToString("yyyy-MM-dd") + "T00:00:00" + $dayEnd = $parsed.AddDays(1).ToString("yyyy-MM-dd") + "T00:00:00" + + $existing = Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" + SELECT id FROM metric + WHERE drive_id = @driveId AND timestamp >= @dayStart AND timestamp < @dayEnd + ORDER BY timestamp DESC LIMIT 1 +"@ -SqlParameters @{ + driveId = $DriveId + dayStart = $dayStart + dayEnd = $dayEnd + } + + if ($existing) { + $updateId = if ($existing -is [array]) { $existing[0].id } else { $existing.id } + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" + UPDATE metric SET timestamp = @ts, used_gb = @used, free_gb = @free, usage_percent = @pct + WHERE id = @id +"@ -SqlParameters @{ + id = $updateId + ts = $Timestamp + used = $UsedGB + free = $FreeGB + pct = $UsagePercent + } | Out-Null + } + else { + Add-MetricRecord -DriveId $DriveId -Timestamp $Timestamp -UsedGB $UsedGB -FreeGB $FreeGB -UsagePercent $UsagePercent } } @@ -473,7 +531,7 @@ function Set-DriveAlertSent { Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "UPDATE drive SET alert_sent = @val WHERE id = @id" -SqlParameters @{ id = $DriveId val = $val - } + } | Out-Null } function Set-DriveOffline { @@ -481,7 +539,7 @@ function Set-DriveOffline { Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "UPDATE drive SET status = 'Offline' WHERE id = @id" -SqlParameters @{ id = $DriveId - } + } | Out-Null } function Remove-StaleDrives { @@ -508,8 +566,8 @@ function Remove-StaleDrives { $daysOffline = [math]::Round(((Get-Date) - $parsedLastSeen).TotalDays, 0) Write-Log "Drive $($drive.drive_letter): Offline for $daysOffline days - removing from database" - Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "DELETE FROM metric WHERE drive_id = @id" -SqlParameters @{ id = $drive.id } - Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "DELETE FROM drive WHERE id = @id" -SqlParameters @{ id = $drive.id } + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "DELETE FROM metric WHERE drive_id = @id" -SqlParameters @{ id = $drive.id } | Out-Null + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "DELETE FROM drive WHERE id = @id" -SqlParameters @{ id = $drive.id } | Out-Null } return $staleDrives.Count @@ -527,7 +585,7 @@ function Remove-OldMetrics { $count = if ($result) { $result.cnt } else { 0 } if ($count -gt 0) { - Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "DELETE FROM metric WHERE timestamp < @cutoff" -SqlParameters @{ cutoff = $cutoff } + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query "DELETE FROM metric WHERE timestamp < @cutoff" -SqlParameters @{ cutoff = $cutoff } | Out-Null Write-VerboseLog "Pruned $count metric records older than $($Script:RETENTION_DAYS) days" } } @@ -558,6 +616,7 @@ function Import-LegacyJsonHistory { $migratedDrives = 0 $migratedPoints = 0 + $skippedPoints = 0 foreach ($prop in $data.drives.PSObject.Properties) { $letter = $prop.Name @@ -582,6 +641,12 @@ function Import-LegacyJsonHistory { if ($driveData.history) { foreach ($entry in $driveData.history) { if (-not $entry.timestamp) { continue } + # Guard against null/missing metric fields that would cast to 0.0 and corrupt regression + if ($null -eq $entry.usedGB -or $null -eq $entry.freeGB -or $null -eq $entry.usagePercent) { + Write-VerboseLog "Migration: Skipping entry with missing fields (ts=$($entry.timestamp))" + $skippedPoints++ + continue + } $ts = $entry.timestamp Add-MetricRecord -DriveId $driveId -Timestamp $ts ` -UsedGB ([double]$entry.usedGB) -FreeGB ([double]$entry.freeGB) ` @@ -595,7 +660,7 @@ function Import-LegacyJsonHistory { # Rename the old JSON file $migratedPath = $Script:LEGACY_JSON_PATH + ".migrated" - Move-Item -Path $Script:LEGACY_JSON_PATH -Destination $migratedPath -Force -ErrorAction SilentlyContinue + Move-Item -Path $Script:LEGACY_JSON_PATH -Destination $migratedPath -Force -ErrorAction Stop # Also rename backup if it exists $backupPath = $Script:LEGACY_JSON_PATH + ".bak" @@ -603,7 +668,8 @@ function Import-LegacyJsonHistory { Move-Item -Path $backupPath -Destination ($backupPath + ".migrated") -Force -ErrorAction SilentlyContinue } - Write-Log ([char]0x2713 + " Migration complete: $migratedDrives drives, $migratedPoints data points") + $skipMsg = if ($skippedPoints -gt 0) { " ($skippedPoints incomplete entries skipped)" } else { "" } + Write-Log ([char]0x2713 + " Migration complete: $migratedDrives drives, $migratedPoints data points$skipMsg") return $true } catch { @@ -945,17 +1011,43 @@ function Build-SvgChart { <# .SYNOPSIS Generates an inline SVG line chart of storage usage trends for all drives. + Dynamically sizes the viewBox to accommodate the legend without clipping. #> param([array]$DriveAnalyses) $chartWidth = 680 - $chartHeight = 240 $mLeft = 52 $mRight = 12 $mTop = 8 - $mBottom = 40 + $plotH = 192 $plotW = $chartWidth - $mLeft - $mRight - $plotH = $chartHeight - $mTop - $mBottom + + # Pre-build legend items to calculate required SVG height + $legendItems = [System.Collections.ArrayList]::new() + foreach ($analysis in $DriveAnalyses) { + $color = $Script:CHART_COLORS[$legendItems.Count % $Script:CHART_COLORS.Count] + $label = ($analysis.Letter -replace ':$', '') + if ($analysis.DriveType -eq 'OS') { $label += " (OS)" } + [void]$legendItems.Add(@{ Label = $label; Color = $color }) + } + + # Calculate legend row count to size the SVG dynamically + if ($legendItems.Count -gt 0) { + $legendRows = 1 + $testX = $mLeft + foreach ($item in $legendItems) { + $testX += 65 + ($item.Label.Length * 2) + if ($testX -gt ($chartWidth - 80)) { + $testX = $mLeft + $legendRows++ + } + } + $mBottom = 30 + ($legendRows * 16) + 6 + } + else { + $mBottom = 40 + } + $chartHeight = $mTop + $plotH + $mBottom $sb = [System.Text.StringBuilder]::new() [void]$sb.Append("") @@ -1002,18 +1094,13 @@ function Build-SvgChart { [void]$sb.Append("$dateLabel") } - # Plot lines for each drive + # Plot lines for each drive (using pre-built legend items for consistent colors) $colorIdx = 0 - $legendItems = [System.Collections.ArrayList]::new() - foreach ($analysis in $DriveAnalyses) { - $color = $Script:CHART_COLORS[$colorIdx % $Script:CHART_COLORS.Count] + $color = $legendItems[$colorIdx].Color $colorIdx++ - if (-not $analysis.Metrics -or $analysis.Metrics.Count -eq 0) { - [void]$legendItems.Add(@{ Label = $analysis.Letter; Color = $color; Type = $analysis.DriveType }) - continue - } + if (-not $analysis.Metrics -or $analysis.Metrics.Count -eq 0) { continue } $points = [System.Collections.ArrayList]::new() foreach ($m in ($analysis.Metrics | Sort-Object timestamp)) { @@ -1032,21 +1119,16 @@ function Build-SvgChart { $coords = $points[0] -split ',' [void]$sb.Append("") } - - [void]$legendItems.Add(@{ Label = $analysis.Letter; Color = $color; Type = $analysis.DriveType }) } # Legend row at bottom $legendY = $mTop + $plotH + 30 $legendX = $mLeft foreach ($item in $legendItems) { - $label = $item.Label -replace ':$', '' - if ($item.Type -eq 'OS') { $label += " (OS)" } - [void]$sb.Append("") - [void]$sb.Append("$label") + [void]$sb.Append("$($item.Label)") - $legendX += 65 + ($label.Length * 2) + $legendX += 65 + ($item.Label.Length * 2) if ($legendX -gt ($chartWidth - 80)) { $legendX = $mLeft $legendY += 16 @@ -1101,15 +1183,19 @@ function Build-StorageReport { # Build SVG chart $svgChart = Build-SvgChart -DriveAnalyses $AllAnalyses + # HTML-encode user-sourced values to prevent injection via volume labels or hostname + $safeHostname = [System.Net.WebUtility]::HtmlEncode($Hostname) + # Build summary table rows $tableRows = [System.Text.StringBuilder]::new() $rowIndex = 0 foreach ($a in $AllAnalyses) { $color = $Script:CHART_COLORS[$rowIndex % $Script:CHART_COLORS.Count] - $letterDisplay = $a.Letter -replace ':$', '' + $letterDisplay = [System.Net.WebUtility]::HtmlEncode(($a.Letter -replace ':$', '')) $typeTag = if ($a.DriveType -eq 'OS') { ' (OS)' } else { '' } - $labelDisplay = if ($a.VolumeLabel) { " $($a.VolumeLabel)" } else { '' } + $safeLabel = if ($a.VolumeLabel) { [System.Net.WebUtility]::HtmlEncode($a.VolumeLabel) } else { '' } + $labelDisplay = if ($safeLabel) { " $safeLabel" } else { '' } $baseStatus = $a.Status -replace '\s*\(Limited\)', '' $badge = $statusBadgeColors[$baseStatus] @@ -1121,10 +1207,14 @@ function Build-StorageReport { $usedStr = if ($a.CurrentUsedGB -gt 0) { "$([math]::Round($a.CurrentUsedGB, 1)) GB" } else { '-' } $freeStr = if ($a.CurrentFreeGB -gt 0) { "$([math]::Round($a.CurrentFreeGB, 1)) GB" } else { '-' } - $growthStr = if ($a.GBPerMonth -match '^\-?\d') { "$($a.GBPerMonth)" } else { $a.GBPerMonth } + $growthStr = $a.GBPerMonth $daysStr = $a.DaysUntilFull - if ($daysStr -eq "1825.00") { $daysStr = "5yr+" } + $capStr = ([double]$Script:DAYS_CAP).ToString("F2") + if ($daysStr -eq $capStr) { + $capYears = [math]::Round($Script:DAYS_CAP / 365, 0) + $daysStr = "${capYears}yr+" + } elseif ($daysStr -match '^\d+\.\d+$') { $daysVal = [double]$daysStr if ($daysVal -ge 365) { $daysStr = "$([math]::Round($daysVal / 365, 1))yr" } @@ -1152,7 +1242,7 @@ function Build-StorageReport { $html = @"
-
$Hostname
+
$safeHostname
Storage: $ServerStatus | $now
@@ -1268,7 +1358,7 @@ function Write-Summary { ) Write-Log "Storage Growth Analysis - $Hostname" - Write-Log ([char]0x2550 * 63) + Write-Log ("$([char]0x2550)" * 63) Write-Log "" # OS Drive first @@ -1296,7 +1386,7 @@ function Write-Summary { Write-Log "" Write-Log "DATA DRIVES" - Write-Log ([char]0x2500 * 63) + Write-Log ("$([char]0x2500)" * 63) $dataDrives = @($AllAnalyses | Where-Object { $_.DriveType -ne 'OS' }) if ($dataDrives.Count -eq 0) { @@ -1337,7 +1427,7 @@ function Write-Summary { } } - Write-Log ([char]0x2550 * 63) + Write-Log ("$([char]0x2550)" * 63) Write-Log "SERVER STATUS: $($ServerStatus.ToUpper())" Write-Log "Database: $($Script:DB_PATH)" Write-Log "" @@ -1351,7 +1441,7 @@ function Main { $deviceId = $env:COMPUTERNAME $runningInNinja = $null -ne (Get-Command "Ninja-Property-Set" -ErrorAction SilentlyContinue) - # ── Step 1: Initialize ─────────────────────────────────────────────────── + # -- Step 1: Initialize --------------------------------------------------- if (-not $runningInNinja) { Write-Log "*** TEST MODE - Not running in Ninja context ***" @@ -1396,7 +1486,7 @@ function Main { # Register device Get-DeviceRecord -DeviceId $deviceId -Hostname $hostname - # ── Step 2: Migrate Legacy Data ────────────────────────────────────────── + # -- Step 2: Migrate Legacy Data ------------------------------------------ # Check for existing JSON history and migrate (one-time) $existingDrives = @(Get-AllDeviceDrives -DeviceId $deviceId) @@ -1404,7 +1494,7 @@ function Main { Import-LegacyJsonHistory -DeviceId $deviceId } - # ── Step 3: Discover & Collect ─────────────────────────────────────────── + # -- Step 3: Discover & Collect ------------------------------------------- $currentDrives = $null try { @@ -1420,18 +1510,18 @@ function Main { Write-Log "WARNING: No qualifying drives found." } - # ── Step 4: Update Database ────────────────────────────────────────────── + # -- Step 4: Update Database ---------------------------------------------- $now = (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss") $visibleLetters = @($currentDrives | ForEach-Object { $_.Letter }) - # Upsert drives and insert metrics + # Upsert drives and record daily metrics (idempotent per calendar day) foreach ($drive in $currentDrives) { $driveId = Save-DriveRecord -DeviceId $deviceId -DriveLetter $drive.Letter ` -VolumeLabel $drive.VolumeLabel -TotalSizeGB $drive.TotalSizeGB ` -DriveType $drive.DriveType -Status "Online" - Add-MetricRecord -DriveId $driveId -Timestamp $now ` + Set-DailyMetric -DriveId $driveId -Timestamp $now ` -UsedGB $drive.UsedGB -FreeGB $drive.FreeGB -UsagePercent $drive.UsagePercent Write-VerboseLog "Drive $($drive.Letter): Metric recorded (Used: $($drive.UsedGB) GB, Free: $($drive.FreeGB) GB)" @@ -1448,11 +1538,14 @@ function Main { # Remove drives offline > 30 days $removedCount = Remove-StaleDrives -DeviceId $deviceId + if ($removedCount -gt 0) { + Write-Log "Removed $removedCount stale drives (offline > $($Script:OFFLINE_REMOVAL_DAYS) days)" + } # Prune old metrics Remove-OldMetrics - # ── Step 5: Analyze All Drives ─────────────────────────────────────────── + # -- Step 5: Analyze All Drives ------------------------------------------- $allDrives = @(Get-AllDeviceDrives -DeviceId $deviceId) $allAnalyses = [System.Collections.ArrayList]::new() @@ -1472,7 +1565,7 @@ function Main { foreach ($a in $osAnalyses) { [void]$sortedAll.Add($a) } foreach ($a in $sortedData) { [void]$sortedAll.Add($a) } - # ── Step 6: Server Status ──────────────────────────────────────────────── + # -- Step 6: Server Status ------------------------------------------------ $worstSeverity = 0 $serverStatus = "Insufficient Data" @@ -1486,7 +1579,7 @@ function Main { } } - # ── Step 7: Critical Drive Alerts (fire-once) ──────────────────────────── + # -- Step 7: Critical Drive Alerts (fire-once) ---------------------------- $newCriticalDrives = [System.Collections.ArrayList]::new() @@ -1512,7 +1605,7 @@ function Main { Write-CriticalAlert -CriticalDrives $newCriticalDrives -Hostname $hostname } - # ── Step 8: Generate Report & Output ───────────────────────────────────── + # -- Step 8: Generate Report & Output ------------------------------------- # Console summary Write-Summary -Hostname $hostname -ServerStatus $serverStatus -AllAnalyses @($sortedAll) ` @@ -1533,7 +1626,7 @@ function Main { Write-VerboseLog "HTML report generated ($($htmlReport.Length) chars)" } - # ── Step 9: Finalize ───────────────────────────────────────────────────── + # -- Step 9: Finalize ----------------------------------------------------- # Log database stats $totalDrives = $allDrives.Count From d1c0ae9d068139929393f9623f94920a90cf9a76 Mon Sep 17 00:00:00 2001 From: Zach Boogher <129975920+AlrightLad@users.noreply.github.com> Date: Wed, 25 Feb 2026 23:12:18 -0500 Subject: [PATCH 8/9] Refactor drive metric handling and improve security Refactor drive metric insertion logic to use atomic DELETE and INSERT in a single batch, improving efficiency and preventing duplicate entries. Update HTML encoding for user-sourced values to enhance security. --- rmm-ninja/ServerGrowthTracking | 69 ++++++++++++++++------------------ 1 file changed, 33 insertions(+), 36 deletions(-) diff --git a/rmm-ninja/ServerGrowthTracking b/rmm-ninja/ServerGrowthTracking index e9907bd..13284cf 100644 --- a/rmm-ninja/ServerGrowthTracking +++ b/rmm-ninja/ServerGrowthTracking @@ -407,6 +407,9 @@ function Save-DriveRecord { } | Out-Null $newDrive = Get-DriveRecord -DeviceId $DeviceId -DriveLetter $DriveLetter + if (-not $newDrive) { + throw "Failed to retrieve newly inserted drive record for ${DeviceId}:${DriveLetter}" + } return $newDrive.id } } @@ -441,6 +444,7 @@ function Set-DailyMetric { .SYNOPSIS Inserts or updates the metric for a drive for the current calendar day. Prevents duplicate rows when the script runs more than once per day. + Uses atomic DELETE+INSERT in a single batch to avoid TOCTOU races. #> param( [int]$DriveId, @@ -459,32 +463,22 @@ function Set-DailyMetric { $dayStart = $parsed.ToString("yyyy-MM-dd") + "T00:00:00" $dayEnd = $parsed.AddDays(1).ToString("yyyy-MM-dd") + "T00:00:00" - $existing = Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" - SELECT id FROM metric - WHERE drive_id = @driveId AND timestamp >= @dayStart AND timestamp < @dayEnd - ORDER BY timestamp DESC LIMIT 1 + # Atomic: delete any existing same-day row then insert the new one, in a single statement batch. + # This runs within a single PSSQLite connection so both statements share one implicit transaction. + Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" + DELETE FROM metric + WHERE drive_id = @driveId AND timestamp >= @dayStart AND timestamp < @dayEnd; + INSERT INTO metric (drive_id, timestamp, used_gb, free_gb, usage_percent) + VALUES (@driveId, @ts, @used, @free, @pct); "@ -SqlParameters @{ driveId = $DriveId dayStart = $dayStart dayEnd = $dayEnd - } - - if ($existing) { - $updateId = if ($existing -is [array]) { $existing[0].id } else { $existing.id } - Invoke-SqliteQuery -DataSource $Script:DB_PATH -Query @" - UPDATE metric SET timestamp = @ts, used_gb = @used, free_gb = @free, usage_percent = @pct - WHERE id = @id -"@ -SqlParameters @{ - id = $updateId - ts = $Timestamp - used = $UsedGB - free = $FreeGB - pct = $UsagePercent - } | Out-Null - } - else { - Add-MetricRecord -DriveId $DriveId -Timestamp $Timestamp -UsedGB $UsedGB -FreeGB $FreeGB -UsagePercent $UsagePercent - } + ts = $Timestamp + used = $UsedGB + free = $FreeGB + pct = $UsagePercent + } | Out-Null } function Get-DriveMetrics { @@ -906,7 +900,7 @@ function Get-DriveAnalysis { Write-VerboseLog "Drive $($DriveRecord.drive_letter): slope=$([math]::Round($dailyGrowth, 4)) GB/day, R$([char]0x00B2)=$($regression.RSquared)" - $result.GBPerMonth = $monthlyGrowth.ToString("F3") + $result.GBPerMonth = $monthlyGrowth.ToString("F3", [System.Globalization.CultureInfo]::InvariantCulture) $currentFreeGB = $result.CurrentFreeGB $currentUsagePercent = $result.CurrentPercent @@ -919,7 +913,7 @@ function Get-DriveAnalysis { $daysUntilFull = $currentFreeGB / $dailyGrowth if ($daysUntilFull -gt $Script:DAYS_CAP) { $daysUntilFull = $Script:DAYS_CAP } $daysUntilFull = [math]::Round($daysUntilFull, 2) - $result.DaysUntilFull = $daysUntilFull.ToString("F2") + $result.DaysUntilFull = $daysUntilFull.ToString("F2", [System.Globalization.CultureInfo]::InvariantCulture) $result.NumericDays = $daysUntilFull } @@ -1026,8 +1020,9 @@ function Build-SvgChart { $legendItems = [System.Collections.ArrayList]::new() foreach ($analysis in $DriveAnalyses) { $color = $Script:CHART_COLORS[$legendItems.Count % $Script:CHART_COLORS.Count] - $label = ($analysis.Letter -replace ':$', '') - if ($analysis.DriveType -eq 'OS') { $label += " (OS)" } + $rawLabel = ($analysis.Letter -replace ':$', '') + if ($analysis.DriveType -eq 'OS') { $rawLabel += " (OS)" } + $label = [System.Net.WebUtility]::HtmlEncode($rawLabel) [void]$legendItems.Add(@{ Label = $label; Color = $color }) } @@ -1036,8 +1031,8 @@ function Build-SvgChart { $legendRows = 1 $testX = $mLeft foreach ($item in $legendItems) { - $testX += 65 + ($item.Label.Length * 2) - if ($testX -gt ($chartWidth - 80)) { + $testX += 28 + ($item.Label.Length * 6) + if ($testX -gt ($chartWidth - 20)) { $testX = $mLeft $legendRows++ } @@ -1128,8 +1123,8 @@ function Build-SvgChart { [void]$sb.Append("") [void]$sb.Append("$($item.Label)") - $legendX += 65 + ($item.Label.Length * 2) - if ($legendX -gt ($chartWidth - 80)) { + $legendX += 28 + ($item.Label.Length * 6) + if ($legendX -gt ($chartWidth - 20)) { $legendX = $mLeft $legendY += 16 } @@ -1178,13 +1173,15 @@ function Build-StorageReport { $now = (Get-Date).ToString("yyyy-MM-dd HH:mm") $driveCount = $AllAnalyses.Count - $totalPoints = ($AllAnalyses | ForEach-Object { $_.DataPoints } | Measure-Object -Sum).Sum + $measureResult = ($AllAnalyses | ForEach-Object { $_.DataPoints } | Measure-Object -Sum) + $totalPoints = if ($null -ne $measureResult.Sum) { [int]$measureResult.Sum } else { 0 } # Build SVG chart $svgChart = Build-SvgChart -DriveAnalyses $AllAnalyses - # HTML-encode user-sourced values to prevent injection via volume labels or hostname + # HTML-encode interpolated values to prevent injection via volume labels, hostname, or status $safeHostname = [System.Net.WebUtility]::HtmlEncode($Hostname) + $safeServerStatus = [System.Net.WebUtility]::HtmlEncode($ServerStatus) # Build summary table rows $tableRows = [System.Text.StringBuilder]::new() @@ -1200,7 +1197,7 @@ function Build-StorageReport { $baseStatus = $a.Status -replace '\s*\(Limited\)', '' $badge = $statusBadgeColors[$baseStatus] if (-not $badge) { $badge = $statusBadgeColors["Offline"] } - $statusDisplay = $a.Status + $statusDisplay = [System.Net.WebUtility]::HtmlEncode($a.Status) $statusHtml = "$statusDisplay" $sizeStr = if ($a.TotalSizeGB -gt 0) { "$([math]::Round($a.TotalSizeGB, 0)) GB" } else { '-' } @@ -1210,7 +1207,7 @@ function Build-StorageReport { $growthStr = $a.GBPerMonth $daysStr = $a.DaysUntilFull - $capStr = ([double]$Script:DAYS_CAP).ToString("F2") + $capStr = ([double]$Script:DAYS_CAP).ToString("F2", [System.Globalization.CultureInfo]::InvariantCulture) if ($daysStr -eq $capStr) { $capYears = [math]::Round($Script:DAYS_CAP / 365, 0) $daysStr = "${capYears}yr+" @@ -1243,7 +1240,7 @@ function Build-StorageReport {
$safeHostname
-
Storage: $ServerStatus | $now
+
Storage: $safeServerStatus | $now
$svgChart @@ -1279,7 +1276,7 @@ v$($Script:VERSION) | $driveCount drives | $totalPoints pts | OLS regression | S function Initialize-EventSource { try { if (-not [System.Diagnostics.EventLog]::SourceExists($Script:EVENT_SOURCE)) { - New-EventLog -LogName Application -Source $Script:EVENT_SOURCE -ErrorAction Stop + New-EventLog -LogName Application -Source $Script:EVENT_SOURCE -ErrorAction Stop | Out-Null Write-VerboseLog "Event log source '$($Script:EVENT_SOURCE)' registered" } } From cce946559c72f95ae9a3c0c908516aaa249406cd Mon Sep 17 00:00:00 2001 From: Zach Boogher <129975920+AlrightLad@users.noreply.github.com> Date: Thu, 26 Mar 2026 01:52:54 -0400 Subject: [PATCH 9/9] Fix formatting in ServerGrowthTracking