");
- sb.Append($"");
+ sb.Append($"");
sb.Append($"Sent by {WebUtility.HtmlEncode(EditionName)}");
if (!isTest)
{
@@ -200,7 +200,7 @@ private static void AppendDetailSection(StringBuilder sb, AlertContext context)
/* Separator + heading */
sb.Append(" ");
sb.Append("");
- sb.Append($"RECENT EVENTS ");
+ sb.Append($"RECENT EVENTS ");
sb.Append(" ");
foreach (var item in context.Details)
diff --git a/Dashboard/Services/PlanAnalyzer.cs b/Dashboard/Services/PlanAnalyzer.cs
index 30b9343..6965bbb 100644
--- a/Dashboard/Services/PlanAnalyzer.cs
+++ b/Dashboard/Services/PlanAnalyzer.cs
@@ -38,23 +38,78 @@ public static void Analyze(ParsedPlan plan)
private static void AnalyzeStatement(PlanStatement stmt)
{
// Rule 3: Serial plan with reason
- if (!string.IsNullOrEmpty(stmt.NonParallelPlanReason))
+ // Skip: trivial cost (< 0.01), TRIVIAL optimization (can't go parallel anyway),
+ // and 0ms actual elapsed time (not worth flagging).
+ if (!string.IsNullOrEmpty(stmt.NonParallelPlanReason)
+ && stmt.StatementSubTreeCost >= 0.01
+ && stmt.StatementOptmLevel != "TRIVIAL"
+ && !(stmt.QueryTimeStats != null && stmt.QueryTimeStats.ElapsedTimeMs == 0))
{
var reason = stmt.NonParallelPlanReason switch
{
+ // User/config forced serial
"MaxDOPSetToOne" => "MAXDOP is set to 1",
+ "QueryHintNoParallelSet" => "OPTION (MAXDOP 1) hint forces serial execution",
+ "ParallelismDisabledByTraceFlag" => "Parallelism disabled by trace flag",
+
+ // Passive — optimizer chose serial, nothing wrong
"EstimatedDOPIsOne" => "Estimated DOP is 1 (the plan's estimated cost was below the cost threshold for parallelism)",
+
+ // Edition/environment limitations
"NoParallelPlansInDesktopOrExpressEdition" => "Express/Desktop edition does not support parallelism",
+ "NoParallelCreateIndexInNonEnterpriseEdition" => "Parallel index creation requires Enterprise edition",
+ "NoParallelPlansDuringUpgrade" => "Parallel plans disabled during upgrade",
+ "NoParallelForPDWCompilation" => "Parallel plans not supported for PDW compilation",
+ "NoParallelForCloudDBReplication" => "Parallel plans not supported during cloud DB replication",
+
+ // Query constructs that block parallelism (actionable)
"CouldNotGenerateValidParallelPlan" => "Optimizer could not generate a valid parallel plan. Common causes: scalar UDFs, inserts into table variables, certain system functions, or OPTION (MAXDOP 1) hints",
- "QueryHintNoParallelSet" => "OPTION (MAXDOP 1) hint forces serial execution",
+ "TSQLUserDefinedFunctionsNotParallelizable" => "T-SQL scalar UDF prevents parallelism. Rewrite as an inline table-valued function, or on SQL Server 2019+ check if the UDF is eligible for automatic inlining",
+ "CLRUserDefinedFunctionRequiresDataAccess" => "CLR UDF with data access prevents parallelism",
+ "NonParallelizableIntrinsicFunction" => "Non-parallelizable intrinsic function in the query",
+ "TableVariableTransactionsDoNotSupportParallelNestedTransaction" => "Table variable transaction prevents parallelism. Consider using a #temp table instead",
+ "UpdatingWritebackVariable" => "Updating a writeback variable prevents parallelism",
+ "DMLQueryReturnsOutputToClient" => "DML with OUTPUT clause returning results to client prevents parallelism",
+ "MixedSerialAndParallelOnlineIndexBuildNotSupported" => "Mixed serial/parallel online index build not supported",
+ "NoRangesResumableCreate" => "Resumable index create cannot use parallelism for this operation",
+
+ // Cursor limitations
+ "NoParallelCursorFetchByBookmark" => "Cursor fetch by bookmark cannot use parallelism",
+ "NoParallelDynamicCursor" => "Dynamic cursors cannot use parallelism",
+ "NoParallelFastForwardCursor" => "Fast-forward cursors cannot use parallelism",
+
+ // Memory-optimized / natively compiled
+ "NoParallelForMemoryOptimizedTables" => "Memory-optimized tables do not support parallel plans",
+ "NoParallelForDmlOnMemoryOptimizedTable" => "DML on memory-optimized tables cannot use parallelism",
+ "NoParallelForNativelyCompiledModule" => "Natively compiled modules do not support parallelism",
+
+ // Remote queries
+ "NoParallelWithRemoteQuery" => "Remote queries cannot use parallelism",
+ "NoRemoteParallelismForMatrix" => "Remote parallelism not available for this query shape",
+
_ => stmt.NonParallelPlanReason
};
+ var isActionable = stmt.NonParallelPlanReason is
+ "MaxDOPSetToOne" or "QueryHintNoParallelSet" or "ParallelismDisabledByTraceFlag"
+ or "CouldNotGenerateValidParallelPlan"
+ or "TSQLUserDefinedFunctionsNotParallelizable"
+ or "CLRUserDefinedFunctionRequiresDataAccess"
+ or "NonParallelizableIntrinsicFunction"
+ or "TableVariableTransactionsDoNotSupportParallelNestedTransaction"
+ or "UpdatingWritebackVariable"
+ or "DMLQueryReturnsOutputToClient"
+ or "NoParallelCursorFetchByBookmark"
+ or "NoParallelDynamicCursor"
+ or "NoParallelFastForwardCursor"
+ or "NoParallelWithRemoteQuery"
+ or "NoRemoteParallelismForMatrix";
+
stmt.PlanWarnings.Add(new PlanWarning
{
WarningType = "Serial Plan",
Message = $"Query running serially: {reason}.",
- Severity = PlanWarningSeverity.Warning
+ Severity = isActionable ? PlanWarningSeverity.Warning : PlanWarningSeverity.Info
});
}
@@ -140,7 +195,7 @@ private static void AnalyzeStatement(PlanStatement stmt)
stmt.PlanWarnings.Add(new PlanWarning
{
WarningType = "UDF Execution",
- Message = $"Scalar UDF cost in this statement: {stmt.QueryUdfElapsedTimeMs:N0}ms elapsed, {stmt.QueryUdfCpuTimeMs:N0}ms CPU. Scalar UDFs run once per row and prevent parallelism. Rewrite as an inline table-valued function, or dump results to a #temp table and apply the UDF only to the final result set.",
+ Message = $"Scalar UDF cost in this statement: {stmt.QueryUdfElapsedTimeMs:N0}ms elapsed, {stmt.QueryUdfCpuTimeMs:N0}ms CPU. Scalar UDFs run once per row and prevent parallelism. Options: rewrite as an inline table-valued function, assign the result to a variable if only one row is needed, dump results to a #temp table and apply the UDF to the final result set, or on SQL Server 2019+ check if the UDF is eligible for automatic scalar UDF inlining.",
Severity = stmt.QueryUdfElapsedTimeMs >= 1000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
});
}
@@ -148,7 +203,8 @@ private static void AnalyzeStatement(PlanStatement stmt)
// Rule 20: Local variables without RECOMPILE
// Parameters with no CompiledValue are likely local variables — the optimizer
// cannot sniff their values and uses density-based ("unknown") estimates.
- if (stmt.Parameters.Count > 0)
+ // Skip trivial statements (simple variable assignments) where estimate quality doesn't matter.
+ if (stmt.Parameters.Count > 0 && stmt.StatementSubTreeCost >= 0.01)
{
var unsnifffedParams = stmt.Parameters
.Where(p => string.IsNullOrEmpty(p.CompiledValue))
@@ -352,21 +408,42 @@ private static void AnalyzeNode(PlanNode node, PlanStatement stmt)
{
// Rule 1: Filter operators — rows survived the tree just to be discarded
// Quantify the impact by summing child subtree cost (reads, CPU, time).
- if (node.PhysicalOp == "Filter" && !string.IsNullOrEmpty(node.Predicate))
+ // Suppress when the filter's child subtree is trivial (low I/O, fast, cheap).
+ if (node.PhysicalOp == "Filter" && !string.IsNullOrEmpty(node.Predicate)
+ && node.Children.Count > 0)
{
- var impact = QuantifyFilterImpact(node);
- var predicate = Truncate(node.Predicate, 200);
- var message = "Filter operator discarding rows late in the plan.";
- if (!string.IsNullOrEmpty(impact))
- message += $"\n{impact}";
- message += $"\nPredicate: {predicate}";
+ // Gate: skip trivial filters based on actual stats or estimated cost
+ bool isTrivial;
+ if (node.HasActualStats)
+ {
+ long childReads = 0;
+ foreach (var child in node.Children)
+ childReads += SumSubtreeReads(child);
+ var childElapsed = node.Children.Max(c => c.ActualElapsedMs);
+ isTrivial = childReads < 128 && childElapsed < 10;
+ }
+ else
+ {
+ var childCost = node.Children.Sum(c => c.EstimatedTotalSubtreeCost);
+ isTrivial = childCost < 1.0;
+ }
- node.Warnings.Add(new PlanWarning
+ if (!isTrivial)
{
- WarningType = "Filter Operator",
- Message = message,
- Severity = PlanWarningSeverity.Warning
- });
+ var impact = QuantifyFilterImpact(node);
+ var predicate = Truncate(node.Predicate, 200);
+ var message = "Filter operator discarding rows late in the plan.";
+ if (!string.IsNullOrEmpty(impact))
+ message += $"\n{impact}";
+ message += $"\nPredicate: {predicate}";
+
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Filter Operator",
+ Message = message,
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
}
// Rule 2: Eager Index Spools — optimizer building temporary indexes on the fly
@@ -391,7 +468,7 @@ private static void AnalyzeNode(PlanNode node, PlanStatement stmt)
node.Warnings.Add(new PlanWarning
{
WarningType = "UDF Execution",
- Message = $"Scalar UDF executing on this operator ({node.UdfElapsedTimeMs:N0}ms elapsed, {node.UdfCpuTimeMs:N0}ms CPU). Scalar UDFs run once per row and prevent parallelism. Rewrite as an inline table-valued function, or dump the query results to a #temp table first and apply the UDF only to the final result set.",
+ Message = $"Scalar UDF executing on this operator ({node.UdfElapsedTimeMs:N0}ms elapsed, {node.UdfCpuTimeMs:N0}ms CPU). Scalar UDFs run once per row and prevent parallelism. Options: rewrite as an inline table-valued function, assign the result to a variable if only one row is needed, dump results to a #temp table and apply the UDF to the final result set, or on SQL Server 2019+ check if the UDF is eligible for automatic scalar UDF inlining.",
Severity = node.UdfElapsedTimeMs >= 1000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
});
}
@@ -451,7 +528,7 @@ private static void AnalyzeNode(PlanNode node, PlanStatement stmt)
node.Warnings.Add(new PlanWarning
{
WarningType = "Scalar UDF",
- Message = $"Scalar {type} UDF: {udf.FunctionName}. Scalar UDFs run once per row and prevent parallelism. Rewrite as an inline table-valued function, or dump results to a #temp table and apply the UDF only to the final result set.",
+ Message = $"Scalar {type} UDF: {udf.FunctionName}. Scalar UDFs run once per row and prevent parallelism. Options: rewrite as an inline table-valued function, assign the result to a variable if only one row is needed, dump results to a #temp table and apply the UDF to the final result set, or on SQL Server 2019+ check if the UDF is eligible for automatic scalar UDF inlining.",
Severity = PlanWarningSeverity.Warning
});
}
@@ -830,12 +907,17 @@ _ when nonSargableReason.StartsWith("Function call", StringComparison.OrdinalIgn
node.EstimateRowsWithoutRowGoal > node.EstimateRows)
{
var reduction = node.EstimateRowsWithoutRowGoal / node.EstimateRows;
- node.Warnings.Add(new PlanWarning
+ // Require at least a 2x reduction to be worth mentioning — "1 to 1" or
+ // tiny floating-point differences that display identically are noise
+ if (reduction >= 2.0)
{
- WarningType = "Row Goal",
- Message = $"Row goal active: estimate reduced from {node.EstimateRowsWithoutRowGoal:N0} to {node.EstimateRows:N0} ({reduction:N0}x reduction) due to TOP, EXISTS, IN, or FAST hint. The optimizer chose this plan shape expecting to stop reading early. If the query reads all rows anyway, the plan choice may be suboptimal.",
- Severity = PlanWarningSeverity.Info
- });
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Row Goal",
+ Message = $"Row goal active: estimate reduced from {node.EstimateRowsWithoutRowGoal:N0} to {node.EstimateRows:N0} ({reduction:N0}x reduction) due to TOP, EXISTS, IN, or FAST hint. The optimizer chose this plan shape expecting to stop reading early. If the query reads all rows anyway, the plan choice may be suboptimal.",
+ Severity = PlanWarningSeverity.Info
+ });
+ }
}
// Rule 28: Row Count Spool — NOT IN with nullable column
@@ -1067,6 +1149,13 @@ private static bool IsOrExpansionChain(PlanNode concatenationNode)
if (parent == null || parent.PhysicalOp != "Nested Loops")
return false;
+ // If this Nested Loops is inside an Anti/Semi Join, this is a NOT IN/IN
+ // subquery pattern (Merge Interval optimizing range lookups), not an OR expansion
+ var nlParent = parent.Parent;
+ if (nlParent != null && nlParent.LogicalOp != null &&
+ nlParent.LogicalOp.Contains("Semi"))
+ return false;
+
return true;
}
diff --git a/Dashboard/Services/ServerManager.cs b/Dashboard/Services/ServerManager.cs
index 1364926..d368664 100644
--- a/Dashboard/Services/ServerManager.cs
+++ b/Dashboard/Services/ServerManager.cs
@@ -15,6 +15,7 @@
using System.Threading.Tasks;
using System.Windows;
using Microsoft.Data.SqlClient;
+using Installer.Core;
using PerformanceMonitorDashboard.Helpers;
using PerformanceMonitorDashboard.Interfaces;
using PerformanceMonitorDashboard.Models;
@@ -323,6 +324,36 @@ CASE WHEN DB_ID('rdsadmin') IS NOT NULL THEN 1 ELSE 0 END AS is_aws_rds
{
Logger.Info($"Connectivity check passed for server '{server.DisplayName}'");
status.UserCancelledMfa = false; // Clear any previous cancellation flag
+
+ /* Query installed PerformanceMonitor version */
+ try
+ {
+ using var versionCmd = new SqlCommand(@"
+ IF DB_ID(N'PerformanceMonitor') IS NOT NULL
+ AND EXISTS (
+ SELECT 1
+ FROM PerformanceMonitor.sys.tables AS t
+ JOIN PerformanceMonitor.sys.schemas AS s
+ ON t.schema_id = s.schema_id
+ WHERE s.name = N'config'
+ AND t.name = N'installation_history'
+ )
+ BEGIN
+ SELECT TOP (1)
+ installer_version
+ FROM PerformanceMonitor.config.installation_history
+ WHERE installation_status = N'SUCCESS'
+ ORDER BY installation_date DESC;
+ END;", connection);
+ versionCmd.CommandTimeout = ConnectionCheckTimeoutSeconds;
+ var versionResult = await versionCmd.ExecuteScalarAsync();
+ status.InstalledMonitorVersion = versionResult is string v ? v : null;
+ }
+ catch (SqlException)
+ {
+ /* Non-critical — don't fail the connectivity check */
+ status.InstalledMonitorVersion = null;
+ }
}
}
catch (SqlException ex)
@@ -383,6 +414,17 @@ public async Task CheckAllConnectionsAsync()
await Task.WhenAll(tasks);
}
+ public async Task GetInstalledVersionAsync(ServerConnection server)
+ {
+ var connectionString = server.GetConnectionString(_credentialService);
+ var builder = new SqlConnectionStringBuilder(connectionString)
+ {
+ InitialCatalog = "master",
+ ConnectTimeout = ConnectionCheckTimeoutSeconds
+ };
+ return await InstallationService.GetInstalledVersionAsync(builder.ConnectionString);
+ }
+
private void LoadServers()
{
if (!File.Exists(_configFilePath))
diff --git a/Dashboard/Services/ShowPlanParser.cs b/Dashboard/Services/ShowPlanParser.cs
index 0066879..f441db9 100644
--- a/Dashboard/Services/ShowPlanParser.cs
+++ b/Dashboard/Services/ShowPlanParser.cs
@@ -972,6 +972,22 @@ private static PlanNode ParseRelOp(XElement relOpEl)
if (actionColEl != null)
node.ActionColumn = FormatColumnRef(actionColEl);
+ // Nonclustered indexes maintained by modification operators (Update/SimpleUpdate/CreateIndex)
+ var opName = physicalOpEl.Name.LocalName;
+ if (opName is "Update" or "SimpleUpdate" or "CreateIndex")
+ {
+ var ncObjects = ScopedDescendants(physicalOpEl, Ns + "Object")
+ .Where(o => string.Equals(o.Attribute("IndexKind")?.Value, "NonClustered", StringComparison.OrdinalIgnoreCase))
+ .ToList();
+ node.NonClusteredIndexCount = ncObjects.Count;
+ foreach (var ncObj in ncObjects)
+ {
+ var ixName = ncObj.Attribute("Index")?.Value?.Replace("[", "").Replace("]", "");
+ if (!string.IsNullOrEmpty(ixName))
+ node.NonClusteredIndexNames.Add(ixName);
+ }
+ }
+
// SET predicate (UPDATE operator)
var setPredicateEl = physicalOpEl.Element(Ns + "SetPredicate");
if (setPredicateEl != null)
@@ -1616,7 +1632,8 @@ private static List ParseWarningsFromElement(XElement warningsEl)
});
}
- // Memory grant warning
+ // Memory grant warning (from plan XML) — gate at 1 GB to avoid noise on small grants
+ // All values are in KB, consistent with MemoryGrantInfo element
var memWarnEl = warningsEl.Element(Ns + "MemoryGrantWarning");
if (memWarnEl != null)
{
@@ -1624,12 +1641,17 @@ private static List ParseWarningsFromElement(XElement warningsEl)
var requested = ParseLong(memWarnEl.Attribute("RequestedMemory")?.Value);
var granted = ParseLong(memWarnEl.Attribute("GrantedMemory")?.Value);
var maxUsed = ParseLong(memWarnEl.Attribute("MaxUsedMemory")?.Value);
- result.Add(new PlanWarning
+ if (granted >= 1048576) // 1 GB in KB
{
- WarningType = "Memory Grant",
- Message = $"{kind}: Requested {requested:N0} KB, Granted {granted:N0} KB, Used {maxUsed:N0} KB",
- Severity = PlanWarningSeverity.Warning
- });
+ var grantedMB = granted / 1024.0;
+ var usedMB = maxUsed / 1024.0;
+ result.Add(new PlanWarning
+ {
+ WarningType = "Memory Grant",
+ Message = $"{kind}: Granted {grantedMB:N0} MB, Used {usedMB:N0} MB",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
}
// Implicit conversions
diff --git a/Dashboard/Themes/CoolBreezeTheme.xaml b/Dashboard/Themes/CoolBreezeTheme.xaml
index ab50dea..4eb7002 100644
--- a/Dashboard/Themes/CoolBreezeTheme.xaml
+++ b/Dashboard/Themes/CoolBreezeTheme.xaml
@@ -22,7 +22,7 @@
#1A2A3A
- #364D61
+ #1A2A3A
#5B7A90
diff --git a/Dashboard/Themes/DarkTheme.xaml b/Dashboard/Themes/DarkTheme.xaml
index 4451911..73cc58c 100644
--- a/Dashboard/Themes/DarkTheme.xaml
+++ b/Dashboard/Themes/DarkTheme.xaml
@@ -22,7 +22,7 @@
#E4E6EB
- #9DA5B4
+ #E4E6EB
#6B7280
@@ -1265,7 +1265,7 @@
-
+
diff --git a/Dashboard/Themes/LightTheme.xaml b/Dashboard/Themes/LightTheme.xaml
index 882619f..b2fff24 100644
--- a/Dashboard/Themes/LightTheme.xaml
+++ b/Dashboard/Themes/LightTheme.xaml
@@ -22,7 +22,7 @@
#1A1D23
- #4A5568
+ #1A1D23
#718096
diff --git a/Installer.Core/Installer.Core.csproj b/Installer.Core/Installer.Core.csproj
index 6c4ca00..fbf0387 100644
--- a/Installer.Core/Installer.Core.csproj
+++ b/Installer.Core/Installer.Core.csproj
@@ -7,10 +7,10 @@
Installer.Core
Installer.Core
SQL Server Performance Monitor Installer Core
- 2.5.0
- 2.5.0.0
- 2.5.0.0
- 2.5.0
+ 2.6.0
+ 2.6.0.0
+ 2.6.0.0
+ 2.6.0
Darling Data, LLC
Copyright (c) 2026 Darling Data, LLC
true
diff --git a/Installer.Core/Patterns.cs b/Installer.Core/Patterns.cs
index 4e5c7fe..081b692 100644
--- a/Installer.Core/Patterns.cs
+++ b/Installer.Core/Patterns.cs
@@ -27,6 +27,14 @@ public static partial class Patterns
@"^\s*GO\s*(?:--[^\r\n]*)?\s*$",
RegexOptions.Compiled | RegexOptions.Multiline | RegexOptions.IgnoreCase);
+ ///
+ /// Matches MSBuild-mangled upgrade folder names from embedded resource names.
+ /// MSBuild converts "2.2.0-to-2.3.0" to "_2._2._0_to_2._3._0" (dots become namespace
+ /// separators, hyphens become underscores, digit-leading segments get underscore prefix).
+ ///
+ [GeneratedRegex(@"^(_\d+\._\d+\._\d+_to_\d+\._\d+\._\d+)\.")]
+ public static partial Regex EmbeddedUpgradeFolderPattern();
+
///
/// Prefixes that indicate excluded scripts (uninstall, test, troubleshooting).
///
diff --git a/Installer.Core/ScriptProvider.cs b/Installer.Core/ScriptProvider.cs
index d07219d..b136226 100644
--- a/Installer.Core/ScriptProvider.cs
+++ b/Installer.Core/ScriptProvider.cs
@@ -330,15 +330,28 @@ public override List GetApplicableUpgrades(
{
string upgradesPrefix = $"{_resourcePrefix}.Resources.upgrades.";
- var folderNames = _assembly.GetManifestResourceNames()
+ /*
+ MSBuild mangles embedded resource names: folder "2.2.0-to-2.3.0" becomes
+ "_2._2._0_to_2._3._0" (dots → namespace separators, hyphens → underscores,
+ digit-leading segments → underscore prefix). Extract the mangled name and
+ recover the original for version parsing. Store mangled name in Path for
+ resource lookups; original in FolderName for display/version parsing.
+ */
+ var mangledNames = _assembly.GetManifestResourceNames()
.Where(r => r.StartsWith(upgradesPrefix, StringComparison.Ordinal))
.Select(r => r[upgradesPrefix.Length..])
- .Select(r => r.Split('.')[0])
+ .Select(r => Patterns.EmbeddedUpgradeFolderPattern().Match(r))
+ .Where(m => m.Success)
+ .Select(m => m.Groups[1].Value)
.Distinct()
.ToList();
- var allUpgrades = folderNames
- .Select(f => ParseUpgradeFolderName(f, f))
+ var allUpgrades = mangledNames
+ .Select(mangled =>
+ {
+ string original = UnmangleUpgradeFolderName(mangled);
+ return ParseUpgradeFolderName(original, mangled);
+ })
.Where(x => x != null)
.Cast()
.ToList();
@@ -348,7 +361,7 @@ public override List GetApplicableUpgrades(
var result = new List();
foreach (var upgrade in filtered)
{
- string manifestResource = $"{upgradesPrefix}{upgrade.FolderName}.upgrade.txt";
+ string manifestResource = $"{upgradesPrefix}{upgrade.Path}.upgrade.txt";
if (_assembly.GetManifestResourceNames().Contains(manifestResource))
{
result.Add(upgrade);
@@ -364,7 +377,7 @@ public override List GetApplicableUpgrades(
public override List GetUpgradeManifest(UpgradeInfo upgrade)
{
string upgradesPrefix = $"{_resourcePrefix}.Resources.upgrades.";
- string manifestResource = $"{upgradesPrefix}{upgrade.FolderName}.upgrade.txt";
+ string manifestResource = $"{upgradesPrefix}{upgrade.Path}.upgrade.txt";
string content = ReadResource(manifestResource);
return ParseUpgradeManifest(content.Split('\n'));
}
@@ -372,7 +385,7 @@ public override List GetUpgradeManifest(UpgradeInfo upgrade)
public override string ReadUpgradeScript(UpgradeInfo upgrade, string scriptName)
{
string upgradesPrefix = $"{_resourcePrefix}.Resources.upgrades.";
- string resource = $"{upgradesPrefix}{upgrade.FolderName}.{scriptName}";
+ string resource = $"{upgradesPrefix}{upgrade.Path}.{scriptName}";
return ReadResource(resource);
}
@@ -383,10 +396,34 @@ public override Task ReadUpgradeScriptAsync(
public override bool UpgradeScriptExists(UpgradeInfo upgrade, string scriptName)
{
string upgradesPrefix = $"{_resourcePrefix}.Resources.upgrades.";
- string resource = $"{upgradesPrefix}{upgrade.FolderName}.{scriptName}";
+ string resource = $"{upgradesPrefix}{upgrade.Path}.{scriptName}";
return _assembly.GetManifestResourceNames().Contains(resource);
}
+ ///
+ /// Reverses MSBuild's resource name mangling for upgrade folder names.
+ /// "_2._2._0_to_2._3._0" → "2.2.0-to-2.3.0"
+ ///
+ private static string UnmangleUpgradeFolderName(string mangled)
+ {
+ /*
+ MSBuild mangling:
+ - dots in folder names become namespace separator dots
+ - hyphens become underscores
+ - segments starting with a digit get an underscore prefix
+ Reverse: remove leading underscores from digit segments,
+ rejoin with dots, then restore the hyphen in "-to-".
+ */
+ var segments = mangled.Split('.');
+ for (int i = 0; i < segments.Length; i++)
+ {
+ if (segments[i].Length > 1 && segments[i][0] == '_' && char.IsDigit(segments[i][1]))
+ segments[i] = segments[i][1..];
+ }
+ string result = string.Join(".", segments);
+ return result.Replace("_to_", "-to-");
+ }
+
public override string? ReadTroubleshootingScript()
{
string resource = $"{_resourcePrefix}.Resources.install.99_installer_troubleshooting.sql";
diff --git a/Installer.Tests/UpgradeOrderingTests.cs b/Installer.Tests/UpgradeOrderingTests.cs
index eace56b..2040070 100644
--- a/Installer.Tests/UpgradeOrderingTests.cs
+++ b/Installer.Tests/UpgradeOrderingTests.cs
@@ -1,3 +1,4 @@
+using System.Text.RegularExpressions;
using Installer.Core;
using Installer.Core.Models;
using Installer.Tests.Helpers;
@@ -146,4 +147,38 @@ public void DoesNotIncludeFutureUpgrades()
Assert.Equal(2, upgrades.Count);
Assert.DoesNotContain(upgrades, u => u.FolderName == "2.2.0-to-2.3.0");
}
+
+ [Fact]
+ public void EmbeddedResources_FindsUpgradeFolders()
+ {
+ // Regression test for #772: MSBuild mangles embedded resource names
+ // (e.g., "2.2.0-to-2.3.0" → "_2._2._0_to_2._3._0"), which broke
+ // upgrade discovery when using Split('.')[0].
+ var provider = ScriptProvider.FromEmbeddedResources();
+ var upgrades = provider.GetApplicableUpgrades("2.2.0", "2.5.0");
+
+ Assert.NotEmpty(upgrades);
+ Assert.Contains(upgrades, u => u.FolderName == "2.2.0-to-2.3.0");
+ }
+
+ [Theory]
+ [InlineData("_2._2._0_to_2._3._0.upgrade.txt", "_2._2._0_to_2._3._0")]
+ [InlineData("_2._2._0_to_2._3._0.03_add_growth_vlf_columns.sql", "_2._2._0_to_2._3._0")]
+ [InlineData("_10._1._0_to_10._2._0.01_schema.sql", "_10._1._0_to_10._2._0")]
+ public void EmbeddedUpgradeFolderPattern_ExtractsMangledName(string resourceSuffix, string expectedMangled)
+ {
+ var match = Patterns.EmbeddedUpgradeFolderPattern().Match(resourceSuffix);
+
+ Assert.True(match.Success);
+ Assert.Equal(expectedMangled, match.Groups[1].Value);
+ }
+
+ [Theory]
+ [InlineData("not-a-version")]
+ [InlineData("readme.txt")]
+ [InlineData("README.md")]
+ public void EmbeddedUpgradeFolderPattern_RejectsNonVersionStrings(string input)
+ {
+ Assert.False(Patterns.EmbeddedUpgradeFolderPattern().Match(input).Success);
+ }
}
diff --git a/Installer/PerformanceMonitorInstaller.csproj b/Installer/PerformanceMonitorInstaller.csproj
index 2f58b54..7c3fa1a 100644
--- a/Installer/PerformanceMonitorInstaller.csproj
+++ b/Installer/PerformanceMonitorInstaller.csproj
@@ -20,10 +20,10 @@
PerformanceMonitorInstaller
SQL Server Performance Monitor Installer
- 2.5.0
- 2.5.0.0
- 2.5.0.0
- 2.5.0
+ 2.6.0
+ 2.6.0.0
+ 2.6.0.0
+ 2.6.0
Darling Data, LLC
Copyright © 2026 Darling Data, LLC
Installation utility for SQL Server Performance Monitor - Supports SQL Server 2016-2025
diff --git a/Installer/Program.cs b/Installer/Program.cs
index daa4cb9..bbb487a 100644
--- a/Installer/Program.cs
+++ b/Installer/Program.cs
@@ -100,12 +100,13 @@ static async Task Main(string[] args)
}
}
- /*Parse encryption option (default: Mandatory)*/
- var encryptArg = args.FirstOrDefault(a => a.StartsWith("--encrypt=", StringComparison.OrdinalIgnoreCase));
+ /*Parse encryption option (default: Mandatory)
+ Supports both --encrypt=optional and --encrypt optional */
string encryptionLevel = "Mandatory";
- if (encryptArg != null)
+ var encryptEqualsArg = args.FirstOrDefault(a => a.StartsWith("--encrypt=", StringComparison.OrdinalIgnoreCase));
+ if (encryptEqualsArg != null)
{
- string encryptValue = encryptArg.Substring("--encrypt=".Length).ToLowerInvariant();
+ string encryptValue = encryptEqualsArg.Substring("--encrypt=".Length).ToLowerInvariant();
encryptionLevel = encryptValue switch
{
"optional" => "Optional",
@@ -113,21 +114,38 @@ static async Task Main(string[] args)
_ => "Mandatory"
};
}
+ else
+ {
+ int encryptIndex = Array.FindIndex(args, a => a.Equals("--encrypt", StringComparison.OrdinalIgnoreCase));
+ if (encryptIndex >= 0 && encryptIndex + 1 < args.Length && !args[encryptIndex + 1].StartsWith("--", StringComparison.Ordinal))
+ {
+ encryptionLevel = args[encryptIndex + 1].ToLowerInvariant() switch
+ {
+ "optional" => "Optional",
+ "strict" => "Strict",
+ _ => "Mandatory"
+ };
+ }
+ }
- /*Filter out option flags and --entra to get positional arguments*/
- var filteredArgsList = args
- .Where(a => !a.Equals("--reinstall", StringComparison.OrdinalIgnoreCase))
- .Where(a => !a.Equals("--uninstall", StringComparison.OrdinalIgnoreCase))
- .Where(a => !a.Equals("--reset-schedule", StringComparison.OrdinalIgnoreCase))
- .Where(a => !a.Equals("--trust-cert", StringComparison.OrdinalIgnoreCase))
- .Where(a => !a.StartsWith("--encrypt=", StringComparison.OrdinalIgnoreCase))
- .Where(a => !a.Equals("--entra", StringComparison.OrdinalIgnoreCase))
- .ToList();
-
- /*Remove the entra email from positional args if present*/
- if (entraEmail != null)
+ /*Filter out all --flags and their trailing values to get positional arguments
+ (server, username, password). Flags like --entra and --encrypt
+ have a following value that must also be removed.*/
+ var filteredArgsList = new List();
+ for (int i = 0; i < args.Length; i++)
{
- filteredArgsList.Remove(entraEmail);
+ if (args[i].StartsWith("--", StringComparison.Ordinal))
+ {
+ /*Skip flags that take a trailing value (--entra , --encrypt )*/
+ if ((args[i].Equals("--entra", StringComparison.OrdinalIgnoreCase)
+ || args[i].Equals("--encrypt", StringComparison.OrdinalIgnoreCase))
+ && i + 1 < args.Length && !args[i + 1].StartsWith("--", StringComparison.Ordinal))
+ {
+ i++; /*skip the value too*/
+ }
+ continue;
+ }
+ filteredArgsList.Add(args[i]);
}
var filteredArgs = filteredArgsList.ToArray();
@@ -231,6 +249,24 @@ Automated mode with command-line arguments
return (int)InstallationResultCode.InvalidArguments;
}
+ Console.Write("Trust server certificate? (Y/N, default Y): ");
+ string? trustResponse = Console.ReadLine()?.Trim();
+ trustCert = string.IsNullOrWhiteSpace(trustResponse)
+ || trustResponse.Equals("Y", StringComparison.OrdinalIgnoreCase);
+
+ Console.WriteLine("Encryption level:");
+ Console.WriteLine(" [O] Optional (default)");
+ Console.WriteLine(" [M] Mandatory");
+ Console.WriteLine(" [S] Strict");
+ Console.Write("Choice (O/M/S, default O): ");
+ string? encryptResponse = Console.ReadLine()?.Trim();
+ encryptionLevel = encryptResponse?.ToUpperInvariant() switch
+ {
+ "M" => "Mandatory",
+ "S" => "Strict",
+ _ => "Optional"
+ };
+
Console.WriteLine("Authentication type:");
Console.WriteLine(" [W] Windows Authentication (default)");
Console.WriteLine(" [S] SQL Server Authentication");
diff --git a/InstallerGui/App.xaml b/InstallerGui/App.xaml
index 7c9deaa..f28634d 100644
--- a/InstallerGui/App.xaml
+++ b/InstallerGui/App.xaml
@@ -14,7 +14,7 @@
-
+
diff --git a/InstallerGui/Themes/DarkTheme.xaml b/InstallerGui/Themes/DarkTheme.xaml
index 2b9092e..7e8d026 100644
--- a/InstallerGui/Themes/DarkTheme.xaml
+++ b/InstallerGui/Themes/DarkTheme.xaml
@@ -17,7 +17,7 @@
-
+
diff --git a/Lite.Tests/AnomalyDetectorTests.cs b/Lite.Tests/AnomalyDetectorTests.cs
new file mode 100644
index 0000000..1efdab9
--- /dev/null
+++ b/Lite.Tests/AnomalyDetectorTests.cs
@@ -0,0 +1,493 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Threading.Tasks;
+using DuckDB.NET.Data;
+using PerformanceMonitorLite.Analysis;
+using PerformanceMonitorLite.Database;
+using Xunit;
+
+namespace PerformanceMonitorLite.Tests;
+
+///
+/// Tests for the upgraded AnomalyDetector: time-bucketed baselines, new detection
+/// methods (batch requests, sessions, query duration, memory), per-metric thresholds,
+/// and baseline context metadata.
+///
+public class AnomalyDetectorTests : IDisposable
+{
+ private readonly string _tempDir;
+ private readonly DuckDbInitializer _duckDb;
+ private readonly BaselineProvider _baselineProvider;
+ private readonly AnomalyDetector _detector;
+
+ private const int ServerId = -999;
+ private const string ServerName = "TestServer";
+
+ // Fixed timestamps for deterministic testing
+ private static readonly DateTime _now = DateTime.UtcNow;
+ private static readonly DateTime _analysisEnd = _now;
+ private static readonly DateTime _analysisStart = _now.AddHours(-4);
+
+ private long _nextId = -1;
+
+ public AnomalyDetectorTests()
+ {
+ _tempDir = Path.Combine(Path.GetTempPath(), "AnomalyTests_" + Guid.NewGuid().ToString("N")[..8]);
+ Directory.CreateDirectory(_tempDir);
+ var dbPath = Path.Combine(_tempDir, "test.duckdb");
+ _duckDb = new DuckDbInitializer(dbPath);
+ _baselineProvider = new BaselineProvider(_duckDb);
+ _detector = new AnomalyDetector(_duckDb, _baselineProvider);
+ BaselineProvider.CacheTtl = TimeSpan.FromMilliseconds(1);
+ }
+
+ public void Dispose()
+ {
+ try
+ {
+ if (Directory.Exists(_tempDir))
+ Directory.Delete(_tempDir, recursive: true);
+ }
+ catch { }
+ }
+
+ private AnalysisContext CreateContext() => new()
+ {
+ ServerId = ServerId,
+ ServerName = ServerName,
+ TimeRangeStart = _analysisStart,
+ TimeRangeEnd = _analysisEnd
+ };
+
+ // ── Batch Requests ──
+
+ [Fact]
+ public async Task DetectBatchRequestAnomalies_Spike_DetectsAnomaly()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Baseline: normal batch requests (~5000)
+ await SeedBaselinePerfmon("Batch Requests/sec", 5000, variance: 200);
+
+ // Analysis window: spike to 15000
+ for (int i = 0; i < 16; i++)
+ await SeedPerfmonAsync(_analysisStart.AddMinutes(i * 15), "Batch Requests/sec", 15000);
+
+ // Need wait/cpu data for HasBaselineDataAsync
+ await SeedBaselineCpu(10, variance: 2);
+
+ var anomalies = await _detector.DetectAnomaliesAsync(CreateContext());
+
+ Assert.Contains(anomalies, f => f.Key == "ANOMALY_BATCH_REQUESTS");
+ var fact = anomalies.First(f => f.Key == "ANOMALY_BATCH_REQUESTS");
+ Assert.True(fact.Metadata["deviation_sigma"] >= 2.0);
+ Assert.True(fact.Metadata.ContainsKey("baseline_hour"));
+ Assert.True(fact.Metadata.ContainsKey("baseline_dow"));
+ Assert.True(fact.Metadata.ContainsKey("baseline_tier"));
+ }
+
+ [Fact]
+ public async Task DetectBatchRequestAnomalies_Normal_NoAnomaly()
+ {
+ await _duckDb.InitializeAsync();
+
+ await SeedBaselinePerfmon("Batch Requests/sec", 5000, variance: 200);
+
+ // Analysis window: same as baseline
+ for (int i = 0; i < 16; i++)
+ await SeedPerfmonAsync(_analysisStart.AddMinutes(i * 15), "Batch Requests/sec", 5000);
+
+ await SeedBaselineCpu(10, variance: 2);
+
+ var anomalies = await _detector.DetectAnomaliesAsync(CreateContext());
+
+ Assert.DoesNotContain(anomalies, f => f.Key == "ANOMALY_BATCH_REQUESTS");
+ }
+
+ // ── Session Count ──
+
+ [Fact]
+ public async Task DetectSessionAnomalies_Spike_DetectsAnomaly()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Baseline: ~20 connections
+ await SeedBaselineSessions(20, variance: 2);
+
+ // Analysis window: spike to 200 connections
+ for (int i = 0; i < 16; i++)
+ {
+ var t = _analysisStart.AddMinutes(i * 15);
+ await SeedSessionStatAsync(t, "App1", 150);
+ await SeedSessionStatAsync(t, "App2", 50);
+ }
+
+ await SeedBaselineCpu(10, variance: 2);
+ // CPU data in analysis window (needed for HasBaselineDataAsync and CPU detector to not exit early)
+ for (int i = 0; i < 4; i++)
+ await SeedCpuAsync(_analysisStart.AddMinutes(i * 15), 10);
+
+ var anomalies = await _detector.DetectAnomaliesAsync(CreateContext());
+
+ Assert.Contains(anomalies, f => f.Key == "ANOMALY_SESSION_SPIKE");
+ }
+
+ [Fact]
+ public async Task DetectSessionAnomalies_Normal_NoAnomaly()
+ {
+ await _duckDb.InitializeAsync();
+
+ await SeedBaselineSessions(20, variance: 2);
+
+ // Analysis window: same as baseline
+ for (int i = 0; i < 16; i++)
+ {
+ var t = _analysisStart.AddMinutes(i * 15);
+ await SeedSessionStatAsync(t, "App1", 15);
+ await SeedSessionStatAsync(t, "App2", 5);
+ }
+
+ await SeedBaselineCpu(10, variance: 2);
+
+ var anomalies = await _detector.DetectAnomaliesAsync(CreateContext());
+
+ Assert.DoesNotContain(anomalies, f => f.Key == "ANOMALY_SESSION_SPIKE");
+ }
+
+ // ── Query Duration ──
+
+ [Fact]
+ public async Task DetectQueryDurationAnomalies_Spike_DetectsAnomaly()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Baseline: ~10000 microseconds total elapsed per collection
+ await SeedBaselineQueryStats(10_000, variance: 1000);
+
+ // Analysis window: spike to 500000 microseconds
+ for (int i = 0; i < 16; i++)
+ await SeedQueryStatAsync(_analysisStart.AddMinutes(i * 15), 500_000, 100);
+
+ await SeedBaselineCpu(10, variance: 2);
+ await SeedBaselineWaits();
+
+ var anomalies = await _detector.DetectAnomaliesAsync(CreateContext());
+
+ Assert.Contains(anomalies, f => f.Key == "ANOMALY_QUERY_DURATION");
+ }
+
+ // ── Memory Pressure ──
+
+ [Fact]
+ public async Task DetectMemoryAnomalies_HighPressure_DetectsAnomaly()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Baseline: ~70% memory pressure
+ await SeedBaselineMemory(70_000, 100_000);
+
+ // Analysis window: spike to 99%
+ for (int i = 0; i < 16; i++)
+ await SeedMemoryStatAsync(_analysisStart.AddMinutes(i * 15), 99_000, 100_000);
+
+ await SeedBaselineCpu(10, variance: 2);
+
+ var anomalies = await _detector.DetectAnomaliesAsync(CreateContext());
+
+ Assert.Contains(anomalies, f => f.Key == "ANOMALY_MEMORY_PRESSURE");
+ }
+
+ [Fact]
+ public async Task DetectMemoryAnomalies_Normal_NoAnomaly()
+ {
+ await _duckDb.InitializeAsync();
+
+ await SeedBaselineMemory(70_000, 100_000);
+
+ // Analysis window: same as baseline
+ for (int i = 0; i < 16; i++)
+ await SeedMemoryStatAsync(_analysisStart.AddMinutes(i * 15), 70_000, 100_000);
+
+ await SeedBaselineCpu(10, variance: 2);
+
+ var anomalies = await _detector.DetectAnomaliesAsync(CreateContext());
+
+ Assert.DoesNotContain(anomalies, f => f.Key == "ANOMALY_MEMORY_PRESSURE");
+ }
+
+ // ── Per-metric threshold ──
+
+ [Fact]
+ public async Task SetDeviationThreshold_HigherThreshold_SuppressesAnomaly()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Baseline: CPU ~10%
+ await SeedBaselineCpu(10, variance: 2);
+
+ // Analysis window: CPU spike to 60% (would normally be >2σ)
+ for (int i = 0; i < 16; i++)
+ await SeedCpuAsync(_analysisStart.AddMinutes(i * 15), 60);
+
+ // Default threshold (2σ) should detect it
+ var anomalies1 = await _detector.DetectAnomaliesAsync(CreateContext());
+ var hasCpu1 = anomalies1.Any(f => f.Key == "ANOMALY_CPU_SPIKE");
+
+ // Set very high threshold — should suppress it
+ _detector.SetDeviationThreshold(MetricNames.Cpu, 100.0);
+ _baselineProvider.ClearCache();
+ var anomalies2 = await _detector.DetectAnomaliesAsync(CreateContext());
+ var hasCpu2 = anomalies2.Any(f => f.Key == "ANOMALY_CPU_SPIKE");
+
+ // Reset
+ _detector.SetDeviationThreshold(MetricNames.Cpu, 2.0);
+
+ Assert.False(hasCpu2, "High threshold should suppress CPU anomaly");
+ }
+
+ // ── Baseline context metadata ──
+
+ [Fact]
+ public async Task AnomalyFacts_ContainBaselineContextMetadata()
+ {
+ await _duckDb.InitializeAsync();
+
+ await SeedBaselineCpu(10, variance: 2);
+
+ // Spike to trigger anomaly
+ for (int i = 0; i < 16; i++)
+ await SeedCpuAsync(_analysisStart.AddMinutes(i * 15), 90);
+
+ var anomalies = await _detector.DetectAnomaliesAsync(CreateContext());
+ var cpuAnomaly = anomalies.FirstOrDefault(f => f.Key == "ANOMALY_CPU_SPIKE");
+
+ if (cpuAnomaly != null)
+ {
+ Assert.True(cpuAnomaly.Metadata.ContainsKey("baseline_hour"), "Missing baseline_hour");
+ Assert.True(cpuAnomaly.Metadata.ContainsKey("baseline_dow"), "Missing baseline_dow");
+ Assert.True(cpuAnomaly.Metadata.ContainsKey("baseline_tier"), "Missing baseline_tier");
+ Assert.True(cpuAnomaly.Metadata.ContainsKey("baseline_mean"), "Missing baseline_mean");
+ Assert.True(cpuAnomaly.Metadata.ContainsKey("deviation_sigma"), "Missing deviation_sigma");
+ }
+ }
+
+ // ── No baseline = no anomalies ──
+
+ [Fact]
+ public async Task DetectAnomalies_NoBaselineData_ReturnsEmpty()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Only analysis window data, no baseline
+ for (int i = 0; i < 16; i++)
+ await SeedCpuAsync(_analysisStart.AddMinutes(i * 15), 90);
+
+ var anomalies = await _detector.DetectAnomaliesAsync(CreateContext());
+
+ // Should not fire — no baseline to compare against
+ Assert.Empty(anomalies);
+ }
+
+ // ── Helpers: seed baseline data in the 30-day window before analysis ──
+
+ ///
+ /// Seeds baseline data across 14 days, keeping all samples within the same hour
+ /// as the analysis start so they land in the same time bucket. Uses 3-minute
+ /// intervals to stay within one hour (14 days × 4 samples = 56 total, enough
+ /// for flat/hour-only collapse).
+ ///
+ private async Task SeedBaselineCpu(int avgCpu, int variance)
+ {
+ var rng = new Random(42);
+ for (int day = 1; day <= 14; day++)
+ {
+ var baseDay = _analysisStart.AddDays(-day);
+ for (int i = 0; i < 4; i++)
+ {
+ var cpu = Math.Clamp(avgCpu + rng.Next(-variance, variance + 1), 0, 100);
+ await SeedCpuAsync(baseDay.AddMinutes(i * 3), cpu);
+ }
+ }
+ }
+
+ private async Task SeedBaselinePerfmon(string counterName, long avgValue, int variance)
+ {
+ var rng = new Random(42);
+ for (int day = 1; day <= 14; day++)
+ {
+ var baseDay = _analysisStart.AddDays(-day);
+ for (int i = 0; i < 4; i++)
+ {
+ var value = Math.Max(0, avgValue + rng.Next(-variance, variance + 1));
+ await SeedPerfmonAsync(baseDay.AddMinutes(i * 3), counterName, value);
+ }
+ }
+ }
+
+ private async Task SeedBaselineSessions(int avgConnections, int variance)
+ {
+ var rng = new Random(42);
+ for (int day = 1; day <= 14; day++)
+ {
+ var baseDay = _analysisStart.AddDays(-day);
+ for (int i = 0; i < 4; i++)
+ {
+ var count = Math.Max(1, avgConnections + rng.Next(-variance, variance + 1));
+ await SeedSessionStatAsync(baseDay.AddMinutes(i * 3), "App1", count);
+ }
+ }
+ }
+
+ private async Task SeedBaselineQueryStats(long avgElapsed, int variance)
+ {
+ var rng = new Random(42);
+ for (int day = 1; day <= 14; day++)
+ {
+ var baseDay = _analysisStart.AddDays(-day);
+ for (int i = 0; i < 4; i++)
+ {
+ var elapsed = Math.Max(0, avgElapsed + rng.Next(-variance, variance + 1));
+ await SeedQueryStatAsync(baseDay.AddMinutes(i * 3), elapsed, 100);
+ }
+ }
+ }
+
+ private async Task SeedBaselineWaits()
+ {
+ for (int day = 1; day <= 14; day++)
+ {
+ var baseDay = _analysisStart.AddDays(-day);
+ for (int i = 0; i < 4; i++)
+ await SeedWaitStatAsync(baseDay.AddMinutes(i * 3), "SOS_SCHEDULER_YIELD", 100);
+ }
+ }
+
+ private async Task SeedBaselineMemory(double avgTotalServerMb, double targetMb)
+ {
+ for (int day = 1; day <= 14; day++)
+ {
+ var baseDay = _analysisStart.AddDays(-day);
+ for (int i = 0; i < 4; i++)
+ await SeedMemoryStatAsync(baseDay.AddMinutes(i * 3), avgTotalServerMb, targetMb);
+ }
+ }
+
+ // ── Helpers: seed individual rows ──
+
+ private async Task SeedCpuAsync(DateTime time, int cpuValue)
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var conn = _duckDb.CreateConnection();
+ await conn.OpenAsync();
+ using var cmd = conn.CreateCommand();
+ cmd.CommandText = @"INSERT INTO cpu_utilization_stats
+ (collection_id, collection_time, server_id, server_name, sample_time,
+ sqlserver_cpu_utilization, other_process_cpu_utilization)
+ VALUES ($1, $2, $3, 'TestServer', $4, $5, 2)";
+ cmd.Parameters.Add(new DuckDBParameter { Value = _nextId-- });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = cpuValue });
+ await cmd.ExecuteNonQueryAsync();
+ }
+
+ private async Task SeedPerfmonAsync(DateTime time, string counterName, long deltaValue)
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var conn = _duckDb.CreateConnection();
+ await conn.OpenAsync();
+ using var cmd = conn.CreateCommand();
+ cmd.CommandText = @"INSERT INTO perfmon_stats
+ (collection_id, collection_time, server_id, server_name,
+ object_name, counter_name, instance_name, cntr_value, delta_cntr_value, sample_interval_seconds)
+ VALUES ($1, $2, $3, 'TestServer', 'SQLServer:SQL Statistics', $4, '', $5, $5, 10)";
+ cmd.Parameters.Add(new DuckDBParameter { Value = _nextId-- });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = counterName });
+ cmd.Parameters.Add(new DuckDBParameter { Value = deltaValue });
+ await cmd.ExecuteNonQueryAsync();
+ }
+
+ private async Task SeedWaitStatAsync(DateTime time, string waitType, long deltaWaitMs)
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var conn = _duckDb.CreateConnection();
+ await conn.OpenAsync();
+ using var cmd = conn.CreateCommand();
+ cmd.CommandText = @"INSERT INTO wait_stats
+ (collection_id, collection_time, server_id, server_name, wait_type,
+ waiting_tasks_count, wait_time_ms, signal_wait_time_ms,
+ delta_waiting_tasks, delta_wait_time_ms, delta_signal_wait_time_ms)
+ VALUES ($1, $2, $3, 'TestServer', $4, 0, 0, 0, 0, $5, 0)";
+ cmd.Parameters.Add(new DuckDBParameter { Value = _nextId-- });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = waitType });
+ cmd.Parameters.Add(new DuckDBParameter { Value = deltaWaitMs });
+ await cmd.ExecuteNonQueryAsync();
+ }
+
+ private async Task SeedSessionStatAsync(DateTime time, string programName, long connectionCount)
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var conn = _duckDb.CreateConnection();
+ await conn.OpenAsync();
+ using var cmd = conn.CreateCommand();
+ cmd.CommandText = @"INSERT INTO session_stats
+ (collection_id, collection_time, server_id, server_name, program_name,
+ connection_count, running_count, sleeping_count, dormant_count)
+ VALUES ($1, $2, $3, 'TestServer', $4, $5, 0, 0, 0)";
+ cmd.Parameters.Add(new DuckDBParameter { Value = _nextId-- });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = programName });
+ cmd.Parameters.Add(new DuckDBParameter { Value = connectionCount });
+ await cmd.ExecuteNonQueryAsync();
+ }
+
+ private async Task SeedQueryStatAsync(DateTime time, long deltaElapsed, long deltaExecCount)
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var conn = _duckDb.CreateConnection();
+ await conn.OpenAsync();
+ using var cmd = conn.CreateCommand();
+ cmd.CommandText = @"INSERT INTO query_stats
+ (collection_id, collection_time, server_id, server_name,
+ execution_count, total_elapsed_time, total_worker_time,
+ total_logical_reads, total_logical_writes, total_physical_reads,
+ delta_execution_count, delta_elapsed_time, delta_worker_time,
+ delta_logical_reads, delta_logical_writes, delta_physical_reads, delta_rows, delta_spills)
+ VALUES ($1, $2, $3, 'TestServer', $4, $5, 0, 0, 0, 0, $4, $5, 0, 0, 0, 0, 0, 0)";
+ cmd.Parameters.Add(new DuckDBParameter { Value = _nextId-- });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = deltaExecCount });
+ cmd.Parameters.Add(new DuckDBParameter { Value = deltaElapsed });
+ await cmd.ExecuteNonQueryAsync();
+ }
+
+ private async Task SeedMemoryStatAsync(DateTime time, double totalServerMb, double targetMb)
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var conn = _duckDb.CreateConnection();
+ await conn.OpenAsync();
+ using var cmd = conn.CreateCommand();
+ cmd.CommandText = @"INSERT INTO memory_stats
+ (collection_id, collection_time, server_id, server_name,
+ total_physical_memory_mb, available_physical_memory_mb,
+ target_server_memory_mb, total_server_memory_mb, buffer_pool_mb)
+ VALUES ($1, $2, $3, 'TestServer', $4, $5, $6, $7, $7)";
+ cmd.Parameters.Add(new DuckDBParameter { Value = _nextId-- });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = targetMb * 1.2 });
+ cmd.Parameters.Add(new DuckDBParameter { Value = targetMb * 0.2 });
+ cmd.Parameters.Add(new DuckDBParameter { Value = targetMb });
+ cmd.Parameters.Add(new DuckDBParameter { Value = totalServerMb });
+ await cmd.ExecuteNonQueryAsync();
+ }
+}
diff --git a/Lite.Tests/BaselineProviderTests.cs b/Lite.Tests/BaselineProviderTests.cs
new file mode 100644
index 0000000..cdf9cad
--- /dev/null
+++ b/Lite.Tests/BaselineProviderTests.cs
@@ -0,0 +1,481 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Threading.Tasks;
+using DuckDB.NET.Data;
+using PerformanceMonitorLite.Analysis;
+using PerformanceMonitorLite.Database;
+using Xunit;
+
+namespace PerformanceMonitorLite.Tests;
+
+///
+/// Tests for BaselineProvider: time-bucketed baseline computation, bucket collapse
+/// with hysteresis, restart poisoning exclusion, and division-by-zero handling.
+///
+public class BaselineProviderTests : IDisposable
+{
+ private readonly string _tempDir;
+ private readonly DuckDbInitializer _duckDb;
+ private readonly BaselineProvider _provider;
+
+ private const int ServerId = -999;
+
+ // Analysis time is pinned to a known hour+dow for deterministic bucket matching.
+ // Wednesday 14:00 UTC (dow=3 in DuckDB where Sunday=0)
+ private static readonly DateTime AnalysisTime = new(2026, 4, 1, 14, 0, 0, DateTimeKind.Utc);
+
+ private long _nextId = -1;
+
+ public BaselineProviderTests()
+ {
+ _tempDir = Path.Combine(Path.GetTempPath(), "BaselineTests_" + Guid.NewGuid().ToString("N")[..8]);
+ Directory.CreateDirectory(_tempDir);
+ var dbPath = Path.Combine(_tempDir, "test.duckdb");
+ _duckDb = new DuckDbInitializer(dbPath);
+ _provider = new BaselineProvider(_duckDb);
+ // Use very short TTL so cache doesn't interfere between tests
+ BaselineProvider.CacheTtl = TimeSpan.FromMilliseconds(1);
+ }
+
+ public void Dispose()
+ {
+ try
+ {
+ if (Directory.Exists(_tempDir))
+ Directory.Delete(_tempDir, recursive: true);
+ }
+ catch { /* Best-effort cleanup */ }
+ }
+
+ // ── Full bucket: enough samples in one hour+dow ──
+
+ [Fact]
+ public async Task GetBaseline_FullBucket_ReturnsMeanAndStdDev()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Seed 20 CPU samples on Wednesdays at 14:xx over 4 weeks (well above RestoreThreshold=15)
+ for (int week = 0; week < 4; week++)
+ {
+ var wednesday = AnalysisTime.AddDays(-7 * (week + 1)); // Previous Wednesdays
+ for (int i = 0; i < 5; i++)
+ {
+ await SeedCpuAsync(wednesday.AddMinutes(i * 10), 50 + i * 2); // 50,52,54,56,58
+ }
+ }
+
+ var baseline = await _provider.GetBaselineAsync(ServerId, MetricNames.Cpu, AnalysisTime);
+
+ Assert.True(baseline.SampleCount >= 15); // Full bucket
+ Assert.Equal(BaselineTier.Full, baseline.Tier);
+ Assert.InRange(baseline.Mean, 50, 58); // Mean of 50,52,54,56,58 repeated
+ Assert.True(baseline.StdDev > 0);
+ }
+
+ // ── Bucket collapse: hour-only fallback ──
+
+ [Fact]
+ public async Task GetBaseline_SparseBucket_CollapsesToHourOnly()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Seed only 5 samples on Wednesday 14:xx (below CollapseThreshold=10)
+ var wednesday = AnalysisTime.AddDays(-7);
+ for (int i = 0; i < 5; i++)
+ await SeedCpuAsync(wednesday.AddMinutes(i * 10), 40 + i);
+
+ // Seed 15 samples on other days at 14:xx (enough for hour-only)
+ for (int dow = 0; dow < 3; dow++) // Sun, Mon, Tue
+ {
+ var day = AnalysisTime.AddDays(-7 - dow - 4); // Different days, same hour
+ for (int i = 0; i < 5; i++)
+ await SeedCpuAsync(day.AddMinutes(i * 10), 60 + i);
+ }
+
+ _provider.ClearCache();
+ var baseline = await _provider.GetBaselineAsync(ServerId, MetricNames.Cpu, AnalysisTime);
+
+ Assert.True(baseline.SampleCount >= 10);
+ Assert.Equal(BaselineTier.HourOnly, baseline.Tier);
+ Assert.Equal(-1, baseline.DayOfWeek); // Indicates hour-only
+ }
+
+ // ── Bucket collapse: flat fallback ──
+
+ [Fact]
+ public async Task GetBaseline_VerySparseBucket_CollapsesToFlat()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Seed only 2 samples at 14:xx (below threshold for hour-only)
+ var day = AnalysisTime.AddDays(-7);
+ await SeedCpuAsync(day.AddMinutes(0), 30);
+ await SeedCpuAsync(day.AddMinutes(15), 35);
+
+ // Seed 5 samples at other hours (enough for flat but not hour-only)
+ for (int h = 0; h < 5; h++)
+ await SeedCpuAsync(day.AddHours(-h - 1), 50 + h);
+
+ _provider.ClearCache();
+ var baseline = await _provider.GetBaselineAsync(ServerId, MetricNames.Cpu, AnalysisTime);
+
+ // Should fall through to flat (7 samples total, >= 3 minimum viable)
+ Assert.True(baseline.SampleCount >= 3);
+ Assert.Equal(BaselineTier.Flat, baseline.Tier);
+ }
+
+ // ── Empty baseline ──
+
+ [Fact]
+ public async Task GetBaseline_NoData_ReturnsEmpty()
+ {
+ await _duckDb.InitializeAsync();
+
+ var baseline = await _provider.GetBaselineAsync(ServerId, MetricNames.Cpu, AnalysisTime);
+
+ Assert.Equal(0, baseline.SampleCount);
+ }
+
+ // ── Hysteresis: between collapse and restore thresholds ──
+
+ [Fact]
+ public async Task GetBaseline_BetweenThresholds_UsesFullBucket()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Seed exactly 12 samples on Wednesday 14:xx (between 10 and 15)
+ for (int week = 0; week < 3; week++)
+ {
+ var wednesday = AnalysisTime.AddDays(-7 * (week + 1));
+ for (int i = 0; i < 4; i++)
+ await SeedCpuAsync(wednesday.AddMinutes(i * 10), 45 + i);
+ }
+
+ _provider.ClearCache();
+ var baseline = await _provider.GetBaselineAsync(ServerId, MetricNames.Cpu, AnalysisTime);
+
+ // 12 samples >= CollapseThreshold(10), so full bucket is used (hysteresis)
+ Assert.Equal(12, baseline.SampleCount);
+ Assert.Equal(BaselineTier.Full, baseline.Tier);
+ }
+
+ // ── Division by zero: proportional floor ──
+
+ [Fact]
+ public void EffectiveStdDev_ZeroStdDev_UsesProportionalFloor()
+ {
+ // All identical values → stddev = 0, mean = 50
+ var bucket = new BaselineBucket
+ {
+ HourOfDay = 14, DayOfWeek = 3,
+ Mean = 50.0, StdDev = 0.0, SampleCount = 20,
+ Tier = BaselineTier.Full
+ };
+
+ // Should be max(0, 50 * 0.01) = 0.5
+ Assert.Equal(0.5, bucket.EffectiveStdDev);
+ }
+
+ [Fact]
+ public void EffectiveStdDev_ZeroMeanAndZeroStdDev_ReturnsZero()
+ {
+ // Zero activity → skip scoring
+ var bucket = new BaselineBucket
+ {
+ HourOfDay = 14, DayOfWeek = 3,
+ Mean = 0.0, StdDev = 0.0, SampleCount = 20,
+ Tier = BaselineTier.Full
+ };
+
+ Assert.Equal(0.0, bucket.EffectiveStdDev);
+ }
+
+ [Fact]
+ public void EffectiveStdDev_NormalStdDev_ReturnsActual()
+ {
+ var bucket = new BaselineBucket
+ {
+ HourOfDay = 14, DayOfWeek = 3,
+ Mean = 50.0, StdDev = 5.0, SampleCount = 20,
+ Tier = BaselineTier.Full
+ };
+
+ // StdDev (5.0) > Mean * 0.01 (0.5), so return actual
+ Assert.Equal(5.0, bucket.EffectiveStdDev);
+ }
+
+ // ── Restart poisoning: cumulative counter drop excluded ──
+
+ [Fact]
+ public async Task GetBaseline_BatchRequests_ExcludesRestartDrop()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Seed batch requests with a restart-shaped drop in the middle
+ var baseDay = AnalysisTime.AddDays(-7);
+ var normalValues = new[] { 5000, 5100, 4900, 5200, 5050, 4950 };
+
+ for (int i = 0; i < normalValues.Length; i++)
+ await SeedPerfmonAsync(baseDay.AddMinutes(i * 10), "Batch Requests/sec", normalValues[i]);
+
+ // Restart drop: value falls to 0 then recovers
+ await SeedPerfmonAsync(baseDay.AddMinutes(60), "Batch Requests/sec", 0); // Restart
+ await SeedPerfmonAsync(baseDay.AddMinutes(70), "Batch Requests/sec", 5100); // Recovery
+
+ // Add enough more samples on other days to reach threshold
+ for (int d = 2; d <= 4; d++)
+ {
+ var day = AnalysisTime.AddDays(-7 * d);
+ for (int i = 0; i < 5; i++)
+ await SeedPerfmonAsync(day.AddMinutes(i * 10), "Batch Requests/sec", 5000 + i * 50);
+ }
+
+ _provider.ClearCache();
+ var baseline = await _provider.GetBaselineAsync(ServerId, MetricNames.BatchRequests, AnalysisTime);
+
+ // The restart drop (0) should be excluded, so mean should be near 5000, not pulled toward 0
+ Assert.True(baseline.Mean > 4000, $"Mean {baseline.Mean} should not be poisoned by restart drop");
+ }
+
+ // ── Wait stats: per-collection aggregation ──
+
+ [Fact]
+ public async Task GetBaseline_WaitStats_AggregatesPerCollection()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Seed multiple wait types at each collection time — baseline should aggregate to total
+ for (int week = 0; week < 4; week++)
+ {
+ var day = AnalysisTime.AddDays(-7 * (week + 1));
+ for (int i = 0; i < 5; i++)
+ {
+ var t = day.AddMinutes(i * 10);
+ await SeedWaitStatAsync(t, "SOS_SCHEDULER_YIELD", 100);
+ await SeedWaitStatAsync(t, "WRITELOG", 50);
+ await SeedWaitStatAsync(t, "PAGEIOLATCH_SH", 30);
+ }
+ }
+
+ _provider.ClearCache();
+ var baseline = await _provider.GetBaselineAsync(ServerId, MetricNames.WaitStats, AnalysisTime);
+
+ Assert.True(baseline.SampleCount > 0);
+ // Mean should be ~180 (100+50+30 per collection)
+ Assert.InRange(baseline.Mean, 150, 210);
+ }
+
+ // ── Session count: per-collection aggregation ──
+
+ [Fact]
+ public async Task GetBaseline_SessionCount_AggregatesPerCollection()
+ {
+ await _duckDb.InitializeAsync();
+
+ // Seed multiple program_name rows per collection
+ for (int week = 0; week < 4; week++)
+ {
+ var day = AnalysisTime.AddDays(-7 * (week + 1));
+ for (int i = 0; i < 5; i++)
+ {
+ var t = day.AddMinutes(i * 10);
+ await SeedSessionStatAsync(t, "App1", 10);
+ await SeedSessionStatAsync(t, "App2", 5);
+ }
+ }
+
+ _provider.ClearCache();
+ var baseline = await _provider.GetBaselineAsync(ServerId, MetricNames.SessionCount, AnalysisTime);
+
+ Assert.True(baseline.SampleCount > 0);
+ // Mean should be ~15 (10+5 per collection)
+ Assert.InRange(baseline.Mean, 12, 18);
+ }
+
+ // ── Cache behavior ──
+
+ [Fact]
+ public async Task GetBaseline_CacheHit_ReturnsSameResult()
+ {
+ await _duckDb.InitializeAsync();
+
+ for (int i = 0; i < 20; i++)
+ await SeedCpuAsync(AnalysisTime.AddDays(-7).AddMinutes(i * 10), 50);
+
+ BaselineProvider.CacheTtl = TimeSpan.FromMinutes(5);
+ _provider.ClearCache();
+
+ var first = await _provider.GetBaselineAsync(ServerId, MetricNames.Cpu, AnalysisTime);
+ var second = await _provider.GetBaselineAsync(ServerId, MetricNames.Cpu, AnalysisTime);
+
+ Assert.Equal(first.Mean, second.Mean);
+ Assert.Equal(first.SampleCount, second.SampleCount);
+
+ // Restore short TTL
+ BaselineProvider.CacheTtl = TimeSpan.FromMilliseconds(1);
+ }
+
+ [Fact]
+ public async Task InvalidateCache_ClearsServerEntries()
+ {
+ await _duckDb.InitializeAsync();
+
+ for (int i = 0; i < 20; i++)
+ await SeedCpuAsync(AnalysisTime.AddDays(-7).AddMinutes(i * 10), 50);
+
+ BaselineProvider.CacheTtl = TimeSpan.FromMinutes(5);
+ _provider.ClearCache();
+
+ await _provider.GetBaselineAsync(ServerId, MetricNames.Cpu, AnalysisTime);
+ _provider.InvalidateCache(ServerId);
+
+ // After invalidation, should recompute (no error, same result)
+ var after = await _provider.GetBaselineAsync(ServerId, MetricNames.Cpu, AnalysisTime);
+ Assert.True(after.SampleCount > 0);
+
+ BaselineProvider.CacheTtl = TimeSpan.FromMilliseconds(1);
+ }
+
+ // ── Server isolation: no cross-contamination ──
+
+ [Fact]
+ public async Task GetBaseline_DifferentServers_NoCrossContamination()
+ {
+ await _duckDb.InitializeAsync();
+
+ int server1 = -998, server2 = -997;
+
+ // Seed different CPU values for two servers
+ for (int i = 0; i < 20; i++)
+ {
+ await SeedCpuAsync(AnalysisTime.AddDays(-7).AddMinutes(i * 10), 80, server1);
+ await SeedCpuAsync(AnalysisTime.AddDays(-7).AddMinutes(i * 10), 20, server2);
+ }
+
+ _provider.ClearCache();
+ var baseline1 = await _provider.GetBaselineAsync(server1, MetricNames.Cpu, AnalysisTime);
+ var baseline2 = await _provider.GetBaselineAsync(server2, MetricNames.Cpu, AnalysisTime);
+
+ Assert.InRange(baseline1.Mean, 75, 85);
+ Assert.InRange(baseline2.Mean, 15, 25);
+ }
+
+ // ── Memory metric (Lite-only) ──
+
+ [Fact]
+ public async Task GetBaseline_Memory_ComputesPressurePercent()
+ {
+ await _duckDb.InitializeAsync();
+
+ // 80% memory pressure: 80GB used of 100GB target
+ for (int week = 0; week < 4; week++)
+ {
+ var day = AnalysisTime.AddDays(-7 * (week + 1));
+ for (int i = 0; i < 5; i++)
+ await SeedMemoryStatAsync(day.AddMinutes(i * 10), totalServerMb: 80_000, targetMb: 100_000);
+ }
+
+ _provider.ClearCache();
+ var baseline = await _provider.GetBaselineAsync(ServerId, MetricNames.Memory, AnalysisTime);
+
+ Assert.True(baseline.SampleCount > 0);
+ Assert.InRange(baseline.Mean, 78, 82); // ~80%
+ }
+
+ // ── Helpers ──
+
+ private async Task SeedCpuAsync(DateTime time, int cpuValue, int serverId = ServerId)
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var conn = _duckDb.CreateConnection();
+ await conn.OpenAsync();
+ using var cmd = conn.CreateCommand();
+ cmd.CommandText = @"INSERT INTO cpu_utilization_stats
+ (collection_id, collection_time, server_id, server_name, sample_time,
+ sqlserver_cpu_utilization, other_process_cpu_utilization)
+ VALUES ($1, $2, $3, 'TestServer', $4, $5, 2)";
+ cmd.Parameters.Add(new DuckDBParameter { Value = _nextId-- });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = serverId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = cpuValue });
+ await cmd.ExecuteNonQueryAsync();
+ }
+
+ private async Task SeedPerfmonAsync(DateTime time, string counterName, long deltaValue)
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var conn = _duckDb.CreateConnection();
+ await conn.OpenAsync();
+ using var cmd = conn.CreateCommand();
+ cmd.CommandText = @"INSERT INTO perfmon_stats
+ (collection_id, collection_time, server_id, server_name,
+ object_name, counter_name, instance_name, cntr_value, delta_cntr_value, sample_interval_seconds)
+ VALUES ($1, $2, $3, 'TestServer', 'SQLServer:SQL Statistics', $4, '', $5, $5, 10)";
+ cmd.Parameters.Add(new DuckDBParameter { Value = _nextId-- });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = counterName });
+ cmd.Parameters.Add(new DuckDBParameter { Value = deltaValue });
+ await cmd.ExecuteNonQueryAsync();
+ }
+
+ private async Task SeedWaitStatAsync(DateTime time, string waitType, long deltaWaitMs)
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var conn = _duckDb.CreateConnection();
+ await conn.OpenAsync();
+ using var cmd = conn.CreateCommand();
+ cmd.CommandText = @"INSERT INTO wait_stats
+ (collection_id, collection_time, server_id, server_name, wait_type,
+ waiting_tasks_count, wait_time_ms, signal_wait_time_ms,
+ delta_waiting_tasks, delta_wait_time_ms, delta_signal_wait_time_ms)
+ VALUES ($1, $2, $3, 'TestServer', $4, 0, 0, 0, 0, $5, 0)";
+ cmd.Parameters.Add(new DuckDBParameter { Value = _nextId-- });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = waitType });
+ cmd.Parameters.Add(new DuckDBParameter { Value = deltaWaitMs });
+ await cmd.ExecuteNonQueryAsync();
+ }
+
+ private async Task SeedSessionStatAsync(DateTime time, string programName, long connectionCount)
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var conn = _duckDb.CreateConnection();
+ await conn.OpenAsync();
+ using var cmd = conn.CreateCommand();
+ cmd.CommandText = @"INSERT INTO session_stats
+ (collection_id, collection_time, server_id, server_name, program_name,
+ connection_count, running_count, sleeping_count, dormant_count)
+ VALUES ($1, $2, $3, 'TestServer', $4, $5, 0, 0, 0)";
+ cmd.Parameters.Add(new DuckDBParameter { Value = _nextId-- });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = programName });
+ cmd.Parameters.Add(new DuckDBParameter { Value = connectionCount });
+ await cmd.ExecuteNonQueryAsync();
+ }
+
+ private async Task SeedMemoryStatAsync(DateTime time, double totalServerMb, double targetMb)
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var conn = _duckDb.CreateConnection();
+ await conn.OpenAsync();
+ using var cmd = conn.CreateCommand();
+ cmd.CommandText = @"INSERT INTO memory_stats
+ (collection_id, collection_time, server_id, server_name,
+ total_physical_memory_mb, available_physical_memory_mb,
+ target_server_memory_mb, total_server_memory_mb, buffer_pool_mb)
+ VALUES ($1, $2, $3, 'TestServer', $4, $5, $6, $7, $7)";
+ cmd.Parameters.Add(new DuckDBParameter { Value = _nextId-- });
+ cmd.Parameters.Add(new DuckDBParameter { Value = time });
+ cmd.Parameters.Add(new DuckDBParameter { Value = ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = targetMb * 1.2 }); // total physical > target
+ cmd.Parameters.Add(new DuckDBParameter { Value = targetMb * 0.2 }); // some available
+ cmd.Parameters.Add(new DuckDBParameter { Value = targetMb });
+ cmd.Parameters.Add(new DuckDBParameter { Value = totalServerMb });
+ await cmd.ExecuteNonQueryAsync();
+ }
+}
diff --git a/Lite.Tests/ScenarioTests.cs b/Lite.Tests/ScenarioTests.cs
index dcadf1a..bc76481 100644
--- a/Lite.Tests/ScenarioTests.cs
+++ b/Lite.Tests/ScenarioTests.cs
@@ -445,7 +445,7 @@ public async Task WaitSpikeAnomaly_HighRatio()
var facts = await collector.CollectFactsAsync(context);
// Run anomaly detection (compares analysis window against baseline)
- var anomalyDetector = new AnomalyDetector(_duckDb);
+ var anomalyDetector = new AnomalyDetector(_duckDb, new BaselineProvider(_duckDb));
var anomalies = await anomalyDetector.DetectAnomaliesAsync(context);
facts.AddRange(anomalies);
diff --git a/Lite/Analysis/AnalysisModels.cs b/Lite/Analysis/AnalysisModels.cs
index 022bfd7..3fb6fd2 100644
--- a/Lite/Analysis/AnalysisModels.cs
+++ b/Lite/Analysis/AnalysisModels.cs
@@ -72,6 +72,7 @@ public class AnalysisStory
public double? LeafFactValue { get; set; }
public int FactCount { get; set; }
public bool IsAbsolution { get; set; }
+ public Dictionary? RootFactMetadata { get; set; }
}
///
@@ -104,6 +105,12 @@ public class AnalysisFinding
/// Contains supporting detail keyed by category (e.g., "top_deadlocks", "queries_at_spike").
///
public Dictionary? DrillDown { get; set; }
+
+ ///
+ /// Root fact metadata from anomaly detection. Ephemeral — not persisted to DuckDB.
+ /// Contains baseline context (mean, stddev, tier, hour, dow) for anomaly findings.
+ ///
+ public Dictionary? RootFactMetadata { get; set; }
}
///
diff --git a/Lite/Analysis/AnalysisService.cs b/Lite/Analysis/AnalysisService.cs
index 43fd508..83fa23d 100644
--- a/Lite/Analysis/AnalysisService.cs
+++ b/Lite/Analysis/AnalysisService.cs
@@ -23,13 +23,14 @@ public class AnalysisService
private readonly InferenceEngine _engine;
private readonly DrillDownCollector _drillDown;
private readonly AnomalyDetector _anomalyDetector;
+ private readonly BaselineProvider _baselineProvider;
///
/// Minimum hours of collected data required before analysis will run.
/// Short collection windows distort fraction-of-period calculations —
/// 5 seconds of THREADPOOL looks alarming in a 16-minute window.
/// Production: 72. Dev/testing: 0.5 (raise before release).
///
- internal double MinimumDataHours { get; set; } = 72;
+ internal double MinimumDataHours { get; set; } = 24; // TODO: restore to 72 before release
///
/// Raised after each analysis run completes, providing the findings for UI display.
@@ -60,7 +61,8 @@ public AnalysisService(DuckDbInitializer duckDb, IPlanFetcher? planFetcher = nul
_graph = new RelationshipGraph();
_engine = new InferenceEngine(_graph);
_drillDown = new DrillDownCollector(duckDb, planFetcher);
- _anomalyDetector = new AnomalyDetector(duckDb);
+ _baselineProvider = new BaselineProvider(duckDb);
+ _anomalyDetector = new AnomalyDetector(duckDb, _baselineProvider);
}
///
diff --git a/Lite/Analysis/AnomalyDetector.cs b/Lite/Analysis/AnomalyDetector.cs
index ab520fd..1398b0e 100644
--- a/Lite/Analysis/AnomalyDetector.cs
+++ b/Lite/Analysis/AnomalyDetector.cs
@@ -1,528 +1,780 @@
-using System;
-using System.Collections.Generic;
-using System.Linq;
-using System.Threading.Tasks;
-using DuckDB.NET.Data;
-using PerformanceMonitorLite.Database;
-using PerformanceMonitorLite.Services;
-
-namespace PerformanceMonitorLite.Analysis;
-
-///
-/// Detects anomalies by comparing the analysis window's metrics against a
-/// baseline period. When a metric deviates significantly from baseline
-/// (mean + standard deviation), an ANOMALY fact is emitted.
-///
-/// This is the "oh shit" mode — detecting acute deviations that don't show
-/// up in aggregate analysis because they're brief. A 5-minute CPU spike
-/// that averages out over 4 hours is invisible to aggregate scoring but
-/// obvious when compared against "what was this metric doing before?"
-///
-/// Baseline selection: uses the 24 hours preceding the analysis window.
-/// If less data is available, uses whatever exists with lower confidence.
-///
-public class AnomalyDetector
-{
- private readonly DuckDbInitializer _duckDb;
-
- ///
- /// Minimum number of baseline samples needed for reliable detection.
- /// Below this, anomalies are still detected but with reduced confidence.
- ///
- private const int MinBaselineSamples = 10;
-
- ///
- /// Number of standard deviations above baseline mean to flag as anomalous.
- ///
- private const double DeviationThreshold = 2.0;
-
- public AnomalyDetector(DuckDbInitializer duckDb)
- {
- _duckDb = duckDb;
- }
-
- ///
- /// Detects anomalies by comparing the analysis window against a baseline period.
- /// Returns anomaly facts to be merged into the main fact list.
- ///
- public async Task> DetectAnomaliesAsync(AnalysisContext context)
- {
- var anomalies = new List();
-
- // Baseline: 24 hours preceding the analysis window
- var baselineEnd = context.TimeRangeStart;
- var baselineStart = baselineEnd.AddHours(-24);
-
- // Check if baseline period has any data at all — if not, skip all anomaly detection.
- // Without baseline data, everything looks anomalous.
- if (!await HasBaselineDataAsync(context.ServerId, baselineStart, baselineEnd))
- return anomalies;
-
- await DetectCpuAnomalies(context, baselineStart, baselineEnd, anomalies);
- await DetectWaitAnomalies(context, baselineStart, baselineEnd, anomalies);
- await DetectBlockingAnomalies(context, baselineStart, baselineEnd, anomalies);
- await DetectIoAnomalies(context, baselineStart, baselineEnd, anomalies);
-
- return anomalies;
- }
-
- ///
- /// Checks if the baseline period has any collected data.
- /// Uses wait_stats as canary — if waits are collected, other data is too.
- ///
- private async Task HasBaselineDataAsync(int serverId, DateTime baselineStart, DateTime baselineEnd)
- {
- try
- {
- using var readLock = _duckDb.AcquireReadLock();
- using var connection = _duckDb.CreateConnection();
- await connection.OpenAsync();
-
- using var cmd = connection.CreateCommand();
- cmd.CommandText = @"
-SELECT (SELECT COUNT(*) FROM v_wait_stats
- WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3)
- + (SELECT COUNT(*) FROM v_cpu_utilization_stats
- WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3)";
- cmd.Parameters.Add(new DuckDBParameter { Value = serverId });
- cmd.Parameters.Add(new DuckDBParameter { Value = baselineStart });
- cmd.Parameters.Add(new DuckDBParameter { Value = baselineEnd });
-
- var count = Convert.ToInt64(await cmd.ExecuteScalarAsync() ?? 0);
- return count > 0;
- }
- catch { return false; }
- }
-
- ///
- /// Detects CPU utilization anomalies by comparing per-sample values
- /// against the baseline distribution.
- ///
- private async Task DetectCpuAnomalies(AnalysisContext context,
- DateTime baselineStart, DateTime baselineEnd, List anomalies)
- {
- try
- {
- using var readLock = _duckDb.AcquireReadLock();
- using var connection = _duckDb.CreateConnection();
- await connection.OpenAsync();
-
- // Get baseline stats
- using var baselineCmd = connection.CreateCommand();
- baselineCmd.CommandText = @"
-SELECT AVG(sqlserver_cpu_utilization) AS mean_cpu,
- STDDEV_SAMP(sqlserver_cpu_utilization) AS stddev_cpu,
- COUNT(*) AS sample_count
-FROM v_cpu_utilization_stats
-WHERE server_id = $1
-AND collection_time >= $2 AND collection_time < $3";
-
- baselineCmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
- baselineCmd.Parameters.Add(new DuckDBParameter { Value = baselineStart });
- baselineCmd.Parameters.Add(new DuckDBParameter { Value = baselineEnd });
-
- double baselineMean = 0, baselineStdDev = 0;
- long baselineSamples = 0;
-
- using (var reader = await baselineCmd.ExecuteReaderAsync())
- {
- if (await reader.ReadAsync())
- {
- baselineMean = reader.IsDBNull(0) ? 0 : Convert.ToDouble(reader.GetValue(0));
- baselineStdDev = reader.IsDBNull(1) ? 0 : Convert.ToDouble(reader.GetValue(1));
- baselineSamples = reader.IsDBNull(2) ? 0 : Convert.ToInt64(reader.GetValue(2));
- }
- }
-
- if (baselineSamples < 3 || baselineStdDev <= 0) return;
-
- // Get peak and average in the analysis window
- using var windowCmd = connection.CreateCommand();
- windowCmd.CommandText = @"
-SELECT MAX(sqlserver_cpu_utilization) AS peak_cpu,
- AVG(sqlserver_cpu_utilization) AS avg_cpu,
- COUNT(*) AS sample_count,
- (SELECT collection_time FROM v_cpu_utilization_stats
- WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
- ORDER BY sqlserver_cpu_utilization DESC LIMIT 1) AS peak_time
-FROM v_cpu_utilization_stats
-WHERE server_id = $1
-AND collection_time >= $2 AND collection_time < $3";
-
- windowCmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
- windowCmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeStart });
- windowCmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeEnd });
-
- using var windowReader = await windowCmd.ExecuteReaderAsync();
- if (!await windowReader.ReadAsync()) return;
-
- var peakCpu = windowReader.IsDBNull(0) ? 0.0 : Convert.ToDouble(windowReader.GetValue(0));
- var avgCpu = windowReader.IsDBNull(1) ? 0.0 : Convert.ToDouble(windowReader.GetValue(1));
- var windowSamples = windowReader.IsDBNull(2) ? 0L : Convert.ToInt64(windowReader.GetValue(2));
- var peakTime = windowReader.IsDBNull(3) ? (DateTime?)null : windowReader.GetDateTime(3);
-
- if (windowSamples == 0) return;
-
- // Check if peak deviates significantly from baseline
- var deviation = (peakCpu - baselineMean) / baselineStdDev;
- if (deviation < DeviationThreshold || peakCpu < 50) return; // Don't flag low absolute values
-
- var confidence = baselineSamples >= MinBaselineSamples ? 1.0 : (double)baselineSamples / MinBaselineSamples;
-
- anomalies.Add(new Fact
- {
- Source = "anomaly",
- Key = "ANOMALY_CPU_SPIKE",
- Value = peakCpu,
- ServerId = context.ServerId,
- Metadata = new Dictionary
- {
- ["peak_cpu"] = peakCpu,
- ["avg_cpu_in_window"] = avgCpu,
- ["baseline_mean"] = baselineMean,
- ["baseline_stddev"] = baselineStdDev,
- ["deviation_sigma"] = deviation,
- ["baseline_samples"] = baselineSamples,
- ["window_samples"] = windowSamples,
- ["confidence"] = confidence,
- ["peak_time_ticks"] = peakTime?.Ticks ?? 0
- }
- });
- }
- catch (Exception ex)
- {
- AppLogger.Error("AnomalyDetector", $"CPU anomaly detection failed: {ex.Message}");
- }
- }
-
- ///
- /// Detects wait stat anomalies — significant waits in the analysis window
- /// that were absent or much lower in the baseline.
- ///
- private async Task DetectWaitAnomalies(AnalysisContext context,
- DateTime baselineStart, DateTime baselineEnd, List anomalies)
- {
- try
- {
- using var readLock = _duckDb.AcquireReadLock();
- using var connection = _duckDb.CreateConnection();
- await connection.OpenAsync();
-
- // Check if baseline has any wait data at all — if not, skip
- using var checkCmd = connection.CreateCommand();
- checkCmd.CommandText = @"
-SELECT COUNT(*) FROM v_wait_stats
-WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3";
- checkCmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
- checkCmd.Parameters.Add(new DuckDBParameter { Value = baselineStart });
- checkCmd.Parameters.Add(new DuckDBParameter { Value = baselineEnd });
- var baselineCount = Convert.ToInt64(await checkCmd.ExecuteScalarAsync() ?? 0);
- if (baselineCount == 0) return;
-
- // Get per-wait-type totals in both windows
- using var cmd = connection.CreateCommand();
- cmd.CommandText = @"
-WITH baseline AS (
- SELECT wait_type,
- SUM(delta_wait_time_ms)::BIGINT AS total_ms
- FROM v_wait_stats
- WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
- AND delta_wait_time_ms > 0
- GROUP BY wait_type
-),
-current_window AS (
- SELECT wait_type,
- SUM(delta_wait_time_ms)::BIGINT AS total_ms
- FROM v_wait_stats
- WHERE server_id = $1 AND collection_time >= $4 AND collection_time <= $5
- AND delta_wait_time_ms > 0
- GROUP BY wait_type
-)
-SELECT c.wait_type,
- c.total_ms AS current_ms,
- COALESCE(b.total_ms, 0) AS baseline_ms
-FROM current_window c
-LEFT JOIN baseline b ON c.wait_type = b.wait_type
-WHERE c.total_ms > 10000 -- At least 10 seconds of wait time
-ORDER BY c.total_ms DESC
-LIMIT 10";
-
- cmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
- cmd.Parameters.Add(new DuckDBParameter { Value = baselineStart });
- cmd.Parameters.Add(new DuckDBParameter { Value = baselineEnd });
- cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeStart });
- cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeEnd });
-
- using var reader = await cmd.ExecuteReaderAsync();
- while (await reader.ReadAsync())
- {
- var waitType = reader.GetString(0);
- var currentMs = Convert.ToInt64(reader.GetValue(1));
- var baselineMs = Convert.ToInt64(reader.GetValue(2));
-
- // Normalize to per-hour rates before comparing (windows are different lengths)
- var baselineHours = (baselineEnd - baselineStart).TotalHours;
- var currentHours = (context.TimeRangeEnd - context.TimeRangeStart).TotalHours;
- if (baselineHours <= 0) baselineHours = 1;
- if (currentHours <= 0) currentHours = 1;
-
- double ratio;
- string anomalyType;
-
- if (baselineMs == 0)
- {
- ratio = currentMs > 60_000 ? 100.0 : 0; // Only flag if > 1 minute total
- anomalyType = "new";
- }
- else
- {
- var baselineRate = baselineMs / baselineHours;
- var currentRate = currentMs / currentHours;
- ratio = baselineRate > 0 ? currentRate / baselineRate : 100.0;
- anomalyType = "spike";
- }
-
- if (ratio < 5.0) continue; // Need at least 5x increase
-
- anomalies.Add(new Fact
- {
- Source = "anomaly",
- Key = $"ANOMALY_WAIT_{waitType}",
- Value = currentMs,
- ServerId = context.ServerId,
- Metadata = new Dictionary
- {
- ["current_ms"] = currentMs,
- ["baseline_ms"] = baselineMs,
- ["ratio"] = ratio,
- ["is_new"] = anomalyType == "new" ? 1 : 0
- }
- });
- }
- }
- catch (Exception ex)
- {
- AppLogger.Error("AnomalyDetector", $"Wait anomaly detection failed: {ex.Message}");
- }
- }
-
- ///
- /// Detects blocking/deadlock anomalies — events in the analysis window
- /// that are significantly above baseline rates.
- ///
- private async Task DetectBlockingAnomalies(AnalysisContext context,
- DateTime baselineStart, DateTime baselineEnd, List anomalies)
- {
- try
- {
- using var readLock = _duckDb.AcquireReadLock();
- using var connection = _duckDb.CreateConnection();
- await connection.OpenAsync();
-
- // Check if baseline period has any data at all
- using var checkCmd = connection.CreateCommand();
- checkCmd.CommandText = @"
-SELECT (SELECT COUNT(*) FROM v_blocked_process_reports
- WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3)
- + (SELECT COUNT(*) FROM v_deadlocks
- WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3)
- + (SELECT COUNT(*) FROM v_wait_stats
- WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3)";
- checkCmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
- checkCmd.Parameters.Add(new DuckDBParameter { Value = baselineStart });
- checkCmd.Parameters.Add(new DuckDBParameter { Value = baselineEnd });
- var baselineDataCount = Convert.ToInt64(await checkCmd.ExecuteScalarAsync() ?? 0);
- if (baselineDataCount == 0) return; // No baseline data = can't detect anomaly
-
- using var cmd = connection.CreateCommand();
- cmd.CommandText = @"
-SELECT
- (SELECT COUNT(*) FROM v_blocked_process_reports
- WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3) AS baseline_blocking,
- (SELECT COUNT(*) FROM v_blocked_process_reports
- WHERE server_id = $1 AND collection_time >= $4 AND collection_time <= $5) AS current_blocking,
- (SELECT COUNT(*) FROM v_deadlocks
- WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3) AS baseline_deadlocks,
- (SELECT COUNT(*) FROM v_deadlocks
- WHERE server_id = $1 AND collection_time >= $4 AND collection_time <= $5) AS current_deadlocks";
-
- cmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
- cmd.Parameters.Add(new DuckDBParameter { Value = baselineStart });
- cmd.Parameters.Add(new DuckDBParameter { Value = baselineEnd });
- cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeStart });
- cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeEnd });
-
- using var reader = await cmd.ExecuteReaderAsync();
- if (!await reader.ReadAsync()) return;
-
- var baselineBlocking = Convert.ToInt64(reader.GetValue(0));
- var currentBlocking = Convert.ToInt64(reader.GetValue(1));
- var baselineDeadlocks = Convert.ToInt64(reader.GetValue(2));
- var currentDeadlocks = Convert.ToInt64(reader.GetValue(3));
-
- // Normalize to per-hour rates (windows are different lengths)
- var baselineHours = (baselineEnd - baselineStart).TotalHours;
- var currentHours = (context.TimeRangeEnd - context.TimeRangeStart).TotalHours;
- if (baselineHours <= 0) baselineHours = 1;
- if (currentHours <= 0) currentHours = 1;
-
- var baselineBlockingRate = baselineBlocking / baselineHours;
- var currentBlockingRate = currentBlocking / currentHours;
- var blockingRatio = baselineBlocking > 0 ? currentBlockingRate / baselineBlockingRate : 100.0;
-
- var baselineDeadlockRate = baselineDeadlocks / baselineHours;
- var currentDeadlockRate = currentDeadlocks / currentHours;
- var deadlockRatio = baselineDeadlocks > 0 ? currentDeadlockRate / baselineDeadlockRate : 100.0;
-
- // Blocking spike: at least 5 events AND 3x baseline rate (or new)
- if (currentBlocking >= 5 && (baselineBlocking == 0 || blockingRatio >= 3))
- {
- anomalies.Add(new Fact
- {
- Source = "anomaly",
- Key = "ANOMALY_BLOCKING_SPIKE",
- Value = currentBlocking,
- ServerId = context.ServerId,
- Metadata = new Dictionary
- {
- ["current_count"] = currentBlocking,
- ["baseline_count"] = baselineBlocking,
- ["ratio"] = blockingRatio
- }
- });
- }
-
- // Deadlock spike: at least 3 events AND 3x baseline rate (or new)
- if (currentDeadlocks >= 3 && (baselineDeadlocks == 0 || deadlockRatio >= 3))
- {
- anomalies.Add(new Fact
- {
- Source = "anomaly",
- Key = "ANOMALY_DEADLOCK_SPIKE",
- Value = currentDeadlocks,
- ServerId = context.ServerId,
- Metadata = new Dictionary
- {
- ["current_count"] = currentDeadlocks,
- ["baseline_count"] = baselineDeadlocks,
- ["ratio"] = deadlockRatio
- }
- });
- }
- }
- catch (Exception ex)
- {
- AppLogger.Error("AnomalyDetector", $"Blocking anomaly detection failed: {ex.Message}");
- }
- }
-
- ///
- /// Detects I/O latency anomalies — significant increase in read/write latency
- /// compared to baseline.
- ///
- private async Task DetectIoAnomalies(AnalysisContext context,
- DateTime baselineStart, DateTime baselineEnd, List anomalies)
- {
- try
- {
- using var readLock = _duckDb.AcquireReadLock();
- using var connection = _duckDb.CreateConnection();
- await connection.OpenAsync();
-
- using var cmd = connection.CreateCommand();
- cmd.CommandText = @"
-WITH baseline AS (
- SELECT AVG(delta_stall_read_ms * 1.0 / NULLIF(delta_reads, 0)) AS avg_read_lat,
- AVG(delta_stall_write_ms * 1.0 / NULLIF(delta_writes, 0)) AS avg_write_lat,
- STDDEV_SAMP(delta_stall_read_ms * 1.0 / NULLIF(delta_reads, 0)) AS stddev_read,
- STDDEV_SAMP(delta_stall_write_ms * 1.0 / NULLIF(delta_writes, 0)) AS stddev_write,
- COUNT(*) AS samples
- FROM v_file_io_stats
- WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
- AND (delta_reads > 0 OR delta_writes > 0)
-),
-current_window AS (
- SELECT AVG(delta_stall_read_ms * 1.0 / NULLIF(delta_reads, 0)) AS avg_read_lat,
- AVG(delta_stall_write_ms * 1.0 / NULLIF(delta_writes, 0)) AS avg_write_lat
- FROM v_file_io_stats
- WHERE server_id = $1 AND collection_time >= $4 AND collection_time <= $5
- AND (delta_reads > 0 OR delta_writes > 0)
-)
-SELECT b.avg_read_lat, b.stddev_read, c.avg_read_lat,
- b.avg_write_lat, b.stddev_write, c.avg_write_lat,
- b.samples
-FROM baseline b, current_window c";
-
- cmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
- cmd.Parameters.Add(new DuckDBParameter { Value = baselineStart });
- cmd.Parameters.Add(new DuckDBParameter { Value = baselineEnd });
- cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeStart });
- cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeEnd });
-
- using var reader = await cmd.ExecuteReaderAsync();
- if (!await reader.ReadAsync()) return;
-
- var baselineReadLat = reader.IsDBNull(0) ? 0.0 : Convert.ToDouble(reader.GetValue(0));
- var stddevRead = reader.IsDBNull(1) ? 0.0 : Convert.ToDouble(reader.GetValue(1));
- var currentReadLat = reader.IsDBNull(2) ? 0.0 : Convert.ToDouble(reader.GetValue(2));
- var baselineWriteLat = reader.IsDBNull(3) ? 0.0 : Convert.ToDouble(reader.GetValue(3));
- var stddevWrite = reader.IsDBNull(4) ? 0.0 : Convert.ToDouble(reader.GetValue(4));
- var currentWriteLat = reader.IsDBNull(5) ? 0.0 : Convert.ToDouble(reader.GetValue(5));
- var samples = reader.IsDBNull(6) ? 0L : Convert.ToInt64(reader.GetValue(6));
-
- if (samples < 3) return;
-
- // Read latency anomaly
- if (stddevRead > 0 && currentReadLat > 10) // At least 10ms to matter
- {
- var readDeviation = (currentReadLat - baselineReadLat) / stddevRead;
- if (readDeviation >= DeviationThreshold)
- {
- anomalies.Add(new Fact
- {
- Source = "anomaly",
- Key = "ANOMALY_READ_LATENCY",
- Value = currentReadLat,
- ServerId = context.ServerId,
- Metadata = new Dictionary
- {
- ["current_latency_ms"] = currentReadLat,
- ["baseline_mean_ms"] = baselineReadLat,
- ["baseline_stddev_ms"] = stddevRead,
- ["deviation_sigma"] = readDeviation,
- ["baseline_samples"] = samples
- }
- });
- }
- }
-
- // Write latency anomaly
- if (stddevWrite > 0 && currentWriteLat > 5) // At least 5ms to matter
- {
- var writeDeviation = (currentWriteLat - baselineWriteLat) / stddevWrite;
- if (writeDeviation >= DeviationThreshold)
- {
- anomalies.Add(new Fact
- {
- Source = "anomaly",
- Key = "ANOMALY_WRITE_LATENCY",
- Value = currentWriteLat,
- ServerId = context.ServerId,
- Metadata = new Dictionary
- {
- ["current_latency_ms"] = currentWriteLat,
- ["baseline_mean_ms"] = baselineWriteLat,
- ["baseline_stddev_ms"] = stddevWrite,
- ["deviation_sigma"] = writeDeviation,
- ["baseline_samples"] = samples
- }
- });
- }
- }
- }
- catch (Exception ex)
- {
- AppLogger.Error("AnomalyDetector", $"I/O anomaly detection failed: {ex.Message}");
- }
- }
-}
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Threading.Tasks;
+using DuckDB.NET.Data;
+using PerformanceMonitorLite.Database;
+using PerformanceMonitorLite.Services;
+
+namespace PerformanceMonitorLite.Analysis;
+
+///
+/// Detects anomalies by comparing the analysis window's metrics against
+/// time-bucketed baselines (hour-of-day x day-of-week, 30-day rolling window).
+///
+/// Two detection patterns:
+/// - Z-score: (observed - mean) / stddev — used for continuous metrics
+/// (CPU, batch requests, I/O latency, session counts, query duration, memory)
+/// - Ratio: currentRate / baselineRate — used for rate/event metrics
+/// (wait stats, blocking, deadlocks)
+///
+/// Baseline computation and caching are handled by BaselineProvider.
+///
+public class AnomalyDetector
+{
+ private readonly DuckDbInitializer _duckDb;
+ private readonly BaselineProvider _baselineProvider;
+
+ ///
+ /// Default number of standard deviations above baseline mean to flag as anomalous.
+ ///
+ private const double DefaultDeviationThreshold = 2.0;
+
+ ///
+ /// Default ratio threshold for rate-based anomaly detection (wait stats).
+ ///
+ private const double DefaultRatioThreshold = 5.0;
+
+ ///
+ /// Default ratio threshold for event-based anomaly detection (blocking/deadlocks).
+ ///
+ private const double DefaultEventRatioThreshold = 3.0;
+
+ ///
+ /// Per-metric deviation thresholds. Metrics not listed use DefaultDeviationThreshold.
+ ///
+ private readonly Dictionary _deviationThresholds = new();
+
+ public AnomalyDetector(DuckDbInitializer duckDb, BaselineProvider baselineProvider)
+ {
+ _duckDb = duckDb;
+ _baselineProvider = baselineProvider;
+ }
+
+ ///
+ /// Sets a custom deviation threshold for a specific metric.
+ ///
+ public void SetDeviationThreshold(string metricName, double threshold)
+ {
+ _deviationThresholds[metricName] = threshold;
+ }
+
+ private double GetDeviationThreshold(string metricName)
+ {
+ return _deviationThresholds.TryGetValue(metricName, out var threshold)
+ ? threshold
+ : DefaultDeviationThreshold;
+ }
+
+ ///
+ /// Adds baseline context metadata to an anomaly fact's metadata dictionary.
+ ///
+ private static void AddBaselineContext(Dictionary metadata, BaselineBucket baseline)
+ {
+ metadata["baseline_hour"] = baseline.HourOfDay;
+ metadata["baseline_dow"] = baseline.DayOfWeek;
+ metadata["baseline_tier"] = (double)baseline.Tier;
+ }
+
+ ///
+ /// Detects anomalies by comparing the analysis window against time-bucketed baselines.
+ /// Returns anomaly facts to be merged into the main fact list.
+ ///
+ public async Task> DetectAnomaliesAsync(AnalysisContext context)
+ {
+ var anomalies = new List();
+
+ // Check if baseline period has any data at all — if not, skip all anomaly detection.
+ if (!await HasBaselineDataAsync(context.ServerId))
+ return anomalies;
+
+ // Existing detection methods (upgraded to time-bucketed baselines)
+ await DetectCpuAnomalies(context, anomalies);
+ await DetectWaitAnomalies(context, anomalies);
+ await DetectBlockingAnomalies(context, anomalies);
+ await DetectIoAnomalies(context, anomalies);
+
+ // New detection methods
+ await DetectBatchRequestAnomalies(context, anomalies);
+ await DetectSessionAnomalies(context, anomalies);
+ await DetectQueryDurationAnomalies(context, anomalies);
+ await DetectMemoryAnomalies(context, anomalies);
+
+ return anomalies;
+ }
+
+ ///
+ /// Checks if the server has enough historical data for meaningful baselines.
+ /// Uses wait_stats as canary — if waits are collected, other data is too.
+ ///
+ private async Task HasBaselineDataAsync(int serverId)
+ {
+ try
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var connection = _duckDb.CreateConnection();
+ await connection.OpenAsync();
+
+ using var cmd = connection.CreateCommand();
+ cmd.CommandText = @"
+SELECT (SELECT COUNT(*) FROM v_wait_stats
+ WHERE server_id = $1 AND collection_time >= $2)
+ + (SELECT COUNT(*) FROM v_cpu_utilization_stats
+ WHERE server_id = $1 AND collection_time >= $2)";
+ cmd.Parameters.Add(new DuckDBParameter { Value = serverId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = DateTime.UtcNow.AddDays(-30) });
+
+ var count = Convert.ToInt64(await cmd.ExecuteScalarAsync() ?? 0);
+ return count > 0;
+ }
+ catch { return false; }
+ }
+
+ ///
+ /// Detects CPU utilization anomalies using z-score against time-bucketed baseline.
+ ///
+ private async Task DetectCpuAnomalies(AnalysisContext context, List anomalies)
+ {
+ try
+ {
+ var baseline = await _baselineProvider.GetBaselineAsync(
+ context.ServerId, MetricNames.Cpu, context.TimeRangeStart);
+
+ if (baseline.SampleCount == 0) return;
+ var effectiveStdDev = baseline.EffectiveStdDev;
+ if (effectiveStdDev <= 0) return; // Zero mean + zero stddev — skip
+
+ using var readLock = _duckDb.AcquireReadLock();
+ using var connection = _duckDb.CreateConnection();
+ await connection.OpenAsync();
+
+ using var cmd = connection.CreateCommand();
+ cmd.CommandText = @"
+SELECT MAX(sqlserver_cpu_utilization) AS peak_cpu,
+ AVG(sqlserver_cpu_utilization) AS avg_cpu,
+ COUNT(*) AS sample_count,
+ (SELECT collection_time FROM v_cpu_utilization_stats
+ WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+ ORDER BY sqlserver_cpu_utilization DESC LIMIT 1) AS peak_time
+FROM v_cpu_utilization_stats
+WHERE server_id = $1
+AND collection_time >= $2 AND collection_time < $3";
+
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeStart });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeEnd });
+
+ using var reader = await cmd.ExecuteReaderAsync();
+ if (!await reader.ReadAsync()) return;
+
+ var peakCpu = reader.IsDBNull(0) ? 0.0 : Convert.ToDouble(reader.GetValue(0));
+ var avgCpu = reader.IsDBNull(1) ? 0.0 : Convert.ToDouble(reader.GetValue(1));
+ var windowSamples = reader.IsDBNull(2) ? 0L : Convert.ToInt64(reader.GetValue(2));
+ var peakTime = reader.IsDBNull(3) ? (DateTime?)null : reader.GetDateTime(3);
+
+ if (windowSamples == 0) return;
+
+ var deviation = (peakCpu - baseline.Mean) / effectiveStdDev;
+ if (deviation < GetDeviationThreshold(MetricNames.Cpu) || peakCpu < 50) return;
+
+ var metadata = new Dictionary
+ {
+ ["peak_cpu"] = peakCpu,
+ ["avg_cpu_in_window"] = avgCpu,
+ ["baseline_mean"] = baseline.Mean,
+ ["baseline_stddev"] = effectiveStdDev,
+ ["deviation_sigma"] = deviation,
+ ["baseline_samples"] = baseline.SampleCount,
+ ["window_samples"] = windowSamples,
+ ["confidence"] = 1.0,
+ ["peak_time_ticks"] = peakTime?.Ticks ?? 0
+ };
+ AddBaselineContext(metadata, baseline);
+
+ anomalies.Add(new Fact
+ {
+ Source = "anomaly",
+ Key = "ANOMALY_CPU_SPIKE",
+ Value = peakCpu,
+ ServerId = context.ServerId,
+ Metadata = metadata
+ });
+ }
+ catch (Exception ex)
+ {
+ AppLogger.Error("AnomalyDetector", $"CPU anomaly detection failed: {ex.Message}");
+ }
+ }
+
+ ///
+ /// Detects wait stat anomalies — total wait time significantly above
+ /// baseline rate for this time bucket. Uses ratio-based scoring.
+ ///
+ private async Task DetectWaitAnomalies(AnalysisContext context, List anomalies)
+ {
+ try
+ {
+ var baseline = await _baselineProvider.GetBaselineAsync(
+ context.ServerId, MetricNames.WaitStats, context.TimeRangeStart);
+
+ // No baseline data at all — can't distinguish "new" waits from "always present."
+ // Skip rather than flagging everything as anomalous.
+ if (baseline.SampleCount == 0) return;
+
+ using var readLock = _duckDb.AcquireReadLock();
+ using var connection = _duckDb.CreateConnection();
+ await connection.OpenAsync();
+
+ // Get per-wait-type totals in the analysis window
+ using var cmd = connection.CreateCommand();
+ cmd.CommandText = @"
+SELECT wait_type,
+ SUM(delta_wait_time_ms)::BIGINT AS total_ms
+FROM v_wait_stats
+WHERE server_id = $1 AND collection_time >= $2 AND collection_time <= $3
+AND delta_wait_time_ms > 0
+GROUP BY wait_type
+HAVING SUM(delta_wait_time_ms) > 10000
+ORDER BY total_ms DESC
+LIMIT 10";
+
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeStart });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeEnd });
+
+ var currentHours = (context.TimeRangeEnd - context.TimeRangeStart).TotalHours;
+ if (currentHours <= 0) currentHours = 1;
+
+ // Baseline mean is total wait ms per collection interval for this time bucket.
+ // If no baseline, use ratio=100 for significant new waits.
+ var baselineRate = baseline.SampleCount > 0 ? baseline.Mean : 0;
+
+ using var reader = await cmd.ExecuteReaderAsync();
+ while (await reader.ReadAsync())
+ {
+ var waitType = reader.GetString(0);
+ var currentMs = Convert.ToInt64(reader.GetValue(1));
+ var currentRate = currentMs / currentHours;
+
+ double ratio;
+ string anomalyType;
+
+ if (baselineRate <= 0 || baseline.SampleCount == 0)
+ {
+ ratio = currentMs > 60_000 ? 100.0 : 0;
+ anomalyType = "new";
+ }
+ else
+ {
+ ratio = currentRate / baselineRate;
+ anomalyType = "spike";
+ }
+
+ if (ratio < DefaultRatioThreshold) continue;
+
+ var metadata = new Dictionary
+ {
+ ["current_ms"] = currentMs,
+ ["baseline_mean"] = baseline.Mean,
+ ["ratio"] = ratio,
+ ["is_new"] = anomalyType == "new" ? 1 : 0
+ };
+ AddBaselineContext(metadata, baseline);
+
+ anomalies.Add(new Fact
+ {
+ Source = "anomaly",
+ Key = $"ANOMALY_WAIT_{waitType}",
+ Value = currentMs,
+ ServerId = context.ServerId,
+ Metadata = metadata
+ });
+ }
+ }
+ catch (Exception ex)
+ {
+ AppLogger.Error("AnomalyDetector", $"Wait anomaly detection failed: {ex.Message}");
+ }
+ }
+
+ ///
+ /// Detects blocking/deadlock anomalies — event rates significantly above
+ /// baseline for this time bucket. Uses ratio-based scoring.
+ ///
+ private async Task DetectBlockingAnomalies(AnalysisContext context, List anomalies)
+ {
+ try
+ {
+ var blockingBaseline = await _baselineProvider.GetBaselineAsync(
+ context.ServerId, MetricNames.Blocking, context.TimeRangeStart);
+ var deadlockBaseline = await _baselineProvider.GetBaselineAsync(
+ context.ServerId, MetricNames.Deadlock, context.TimeRangeStart);
+
+ using var readLock = _duckDb.AcquireReadLock();
+ using var connection = _duckDb.CreateConnection();
+ await connection.OpenAsync();
+
+ using var cmd = connection.CreateCommand();
+ cmd.CommandText = @"
+SELECT
+ (SELECT COUNT(*) FROM v_blocked_process_reports
+ WHERE server_id = $1 AND collection_time >= $2 AND collection_time <= $3) AS current_blocking,
+ (SELECT COUNT(*) FROM v_deadlocks
+ WHERE server_id = $1 AND collection_time >= $2 AND collection_time <= $3) AS current_deadlocks";
+
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeStart });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeEnd });
+
+ using var reader = await cmd.ExecuteReaderAsync();
+ if (!await reader.ReadAsync()) return;
+
+ var currentBlocking = Convert.ToInt64(reader.GetValue(0));
+ var currentDeadlocks = Convert.ToInt64(reader.GetValue(1));
+
+ // Baseline mean = events per day for this hour+dow bucket
+ var baselineBlockingRate = blockingBaseline.SampleCount > 0 ? blockingBaseline.Mean : 0;
+ var baselineDeadlockRate = deadlockBaseline.SampleCount > 0 ? deadlockBaseline.Mean : 0;
+
+ // Blocking spike: at least 5 events AND 3x baseline rate (or no baseline)
+ if (currentBlocking >= 5 && (baselineBlockingRate <= 0 || currentBlocking / Math.Max(baselineBlockingRate, 1) >= DefaultEventRatioThreshold))
+ {
+ var metadata = new Dictionary
+ {
+ ["current_count"] = currentBlocking,
+ ["baseline_rate"] = baselineBlockingRate,
+ ["ratio"] = baselineBlockingRate > 0 ? currentBlocking / baselineBlockingRate : 100.0
+ };
+ AddBaselineContext(metadata, blockingBaseline);
+
+ anomalies.Add(new Fact
+ {
+ Source = "anomaly",
+ Key = "ANOMALY_BLOCKING_SPIKE",
+ Value = currentBlocking,
+ ServerId = context.ServerId,
+ Metadata = metadata
+ });
+ }
+
+ // Deadlock spike: at least 3 events AND 3x baseline rate (or no baseline)
+ if (currentDeadlocks >= 3 && (baselineDeadlockRate <= 0 || currentDeadlocks / Math.Max(baselineDeadlockRate, 1) >= DefaultEventRatioThreshold))
+ {
+ var metadata = new Dictionary
+ {
+ ["current_count"] = currentDeadlocks,
+ ["baseline_rate"] = baselineDeadlockRate,
+ ["ratio"] = baselineDeadlockRate > 0 ? currentDeadlocks / baselineDeadlockRate : 100.0
+ };
+ AddBaselineContext(metadata, deadlockBaseline);
+
+ anomalies.Add(new Fact
+ {
+ Source = "anomaly",
+ Key = "ANOMALY_DEADLOCK_SPIKE",
+ Value = currentDeadlocks,
+ ServerId = context.ServerId,
+ Metadata = metadata
+ });
+ }
+ }
+ catch (Exception ex)
+ {
+ AppLogger.Error("AnomalyDetector", $"Blocking anomaly detection failed: {ex.Message}");
+ }
+ }
+
+ ///
+ /// Detects I/O latency anomalies using z-score against time-bucketed baseline.
+ ///
+ private async Task DetectIoAnomalies(AnalysisContext context, List anomalies)
+ {
+ try
+ {
+ var baseline = await _baselineProvider.GetBaselineAsync(
+ context.ServerId, MetricNames.IoLatency, context.TimeRangeStart);
+
+ if (baseline.SampleCount == 0) return;
+ var effectiveStdDev = baseline.EffectiveStdDev;
+ if (effectiveStdDev <= 0) return;
+
+ using var readLock = _duckDb.AcquireReadLock();
+ using var connection = _duckDb.CreateConnection();
+ await connection.OpenAsync();
+
+ using var cmd = connection.CreateCommand();
+ cmd.CommandText = @"
+SELECT AVG(delta_stall_read_ms * 1.0 / NULLIF(delta_reads, 0)) AS avg_read_lat,
+ AVG(delta_stall_write_ms * 1.0 / NULLIF(delta_writes, 0)) AS avg_write_lat
+FROM v_file_io_stats
+WHERE server_id = $1 AND collection_time >= $2 AND collection_time <= $3
+AND (delta_reads > 0 OR delta_writes > 0)";
+
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeStart });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeEnd });
+
+ using var reader = await cmd.ExecuteReaderAsync();
+ if (!await reader.ReadAsync()) return;
+
+ var currentReadLat = reader.IsDBNull(0) ? 0.0 : Convert.ToDouble(reader.GetValue(0));
+ var currentWriteLat = reader.IsDBNull(1) ? 0.0 : Convert.ToDouble(reader.GetValue(1));
+
+ var ioThreshold = GetDeviationThreshold(MetricNames.IoLatency);
+
+ // Read latency anomaly
+ if (currentReadLat > 10)
+ {
+ var readDeviation = (currentReadLat - baseline.Mean) / effectiveStdDev;
+ if (readDeviation >= ioThreshold)
+ {
+ var metadata = new Dictionary
+ {
+ ["current_latency_ms"] = currentReadLat,
+ ["baseline_mean_ms"] = baseline.Mean,
+ ["baseline_stddev_ms"] = effectiveStdDev,
+ ["deviation_sigma"] = readDeviation,
+ ["baseline_samples"] = baseline.SampleCount
+ };
+ AddBaselineContext(metadata, baseline);
+
+ anomalies.Add(new Fact
+ {
+ Source = "anomaly",
+ Key = "ANOMALY_READ_LATENCY",
+ Value = currentReadLat,
+ ServerId = context.ServerId,
+ Metadata = metadata
+ });
+ }
+ }
+
+ // Write latency anomaly
+ if (currentWriteLat > 5)
+ {
+ var writeDeviation = (currentWriteLat - baseline.Mean) / effectiveStdDev;
+ if (writeDeviation >= ioThreshold)
+ {
+ var metadata = new Dictionary
+ {
+ ["current_latency_ms"] = currentWriteLat,
+ ["baseline_mean_ms"] = baseline.Mean,
+ ["baseline_stddev_ms"] = effectiveStdDev,
+ ["deviation_sigma"] = writeDeviation,
+ ["baseline_samples"] = baseline.SampleCount
+ };
+ AddBaselineContext(metadata, baseline);
+
+ anomalies.Add(new Fact
+ {
+ Source = "anomaly",
+ Key = "ANOMALY_WRITE_LATENCY",
+ Value = currentWriteLat,
+ ServerId = context.ServerId,
+ Metadata = metadata
+ });
+ }
+ }
+ }
+ catch (Exception ex)
+ {
+ AppLogger.Error("AnomalyDetector", $"I/O anomaly detection failed: {ex.Message}");
+ }
+ }
+
+ ///
+ /// Detects batch requests/sec anomalies using z-score against time-bucketed baseline.
+ ///
+ private async Task DetectBatchRequestAnomalies(AnalysisContext context, List anomalies)
+ {
+ try
+ {
+ var baseline = await _baselineProvider.GetBaselineAsync(
+ context.ServerId, MetricNames.BatchRequests, context.TimeRangeStart);
+
+ if (baseline.SampleCount == 0) return;
+ var effectiveStdDev = baseline.EffectiveStdDev;
+ if (effectiveStdDev <= 0) return;
+
+ using var readLock = _duckDb.AcquireReadLock();
+ using var connection = _duckDb.CreateConnection();
+ await connection.OpenAsync();
+
+ using var cmd = connection.CreateCommand();
+ cmd.CommandText = @"
+SELECT AVG(delta_cntr_value) AS avg_batch,
+ MAX(delta_cntr_value) AS peak_batch,
+ COUNT(*) AS sample_count
+FROM v_perfmon_stats
+WHERE server_id = $1 AND collection_time >= $2 AND collection_time <= $3
+AND counter_name = 'Batch Requests/sec'
+AND delta_cntr_value >= 0";
+
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeStart });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeEnd });
+
+ using var reader = await cmd.ExecuteReaderAsync();
+ if (!await reader.ReadAsync()) return;
+
+ var avgBatch = reader.IsDBNull(0) ? 0.0 : Convert.ToDouble(reader.GetValue(0));
+ var peakBatch = reader.IsDBNull(1) ? 0.0 : Convert.ToDouble(reader.GetValue(1));
+ var windowSamples = reader.IsDBNull(2) ? 0L : Convert.ToInt64(reader.GetValue(2));
+
+ if (windowSamples == 0) return;
+
+ var deviation = (peakBatch - baseline.Mean) / effectiveStdDev;
+ if (deviation < GetDeviationThreshold(MetricNames.BatchRequests)) return;
+
+ var metadata = new Dictionary
+ {
+ ["peak_batch_requests"] = peakBatch,
+ ["avg_batch_requests"] = avgBatch,
+ ["baseline_mean"] = baseline.Mean,
+ ["baseline_stddev"] = effectiveStdDev,
+ ["deviation_sigma"] = deviation,
+ ["baseline_samples"] = baseline.SampleCount,
+ ["window_samples"] = windowSamples
+ };
+ AddBaselineContext(metadata, baseline);
+
+ anomalies.Add(new Fact
+ {
+ Source = "anomaly",
+ Key = "ANOMALY_BATCH_REQUESTS",
+ Value = peakBatch,
+ ServerId = context.ServerId,
+ Metadata = metadata
+ });
+ }
+ catch (Exception ex)
+ {
+ AppLogger.Error("AnomalyDetector", $"Batch request anomaly detection failed: {ex.Message}");
+ }
+ }
+
+ ///
+ /// Detects session/connection count anomalies using z-score against time-bucketed baseline.
+ ///
+ private async Task DetectSessionAnomalies(AnalysisContext context, List anomalies)
+ {
+ try
+ {
+ var baseline = await _baselineProvider.GetBaselineAsync(
+ context.ServerId, MetricNames.SessionCount, context.TimeRangeStart);
+
+ if (baseline.SampleCount == 0) return;
+ var effectiveStdDev = baseline.EffectiveStdDev;
+ if (effectiveStdDev <= 0) return;
+
+ using var readLock = _duckDb.AcquireReadLock();
+ using var connection = _duckDb.CreateConnection();
+ await connection.OpenAsync();
+
+ using var cmd = connection.CreateCommand();
+ cmd.CommandText = @"
+WITH per_collection AS (
+ SELECT collection_time,
+ SUM(connection_count)::DOUBLE AS total_connections
+ FROM v_session_stats
+ WHERE server_id = $1 AND collection_time >= $2 AND collection_time <= $3
+ GROUP BY collection_time
+)
+SELECT AVG(total_connections) AS avg_connections,
+ MAX(total_connections) AS peak_connections,
+ COUNT(*) AS sample_count
+FROM per_collection";
+
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeStart });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeEnd });
+
+ using var reader = await cmd.ExecuteReaderAsync();
+ if (!await reader.ReadAsync()) return;
+
+ var avgConnections = reader.IsDBNull(0) ? 0.0 : Convert.ToDouble(reader.GetValue(0));
+ var peakConnections = reader.IsDBNull(1) ? 0.0 : Convert.ToDouble(reader.GetValue(1));
+ var windowSamples = reader.IsDBNull(2) ? 0L : Convert.ToInt64(reader.GetValue(2));
+
+ if (windowSamples == 0) return;
+
+ var deviation = (peakConnections - baseline.Mean) / effectiveStdDev;
+ if (deviation < GetDeviationThreshold(MetricNames.SessionCount)) return;
+
+ var metadata = new Dictionary
+ {
+ ["peak_connections"] = peakConnections,
+ ["avg_connections"] = avgConnections,
+ ["baseline_mean"] = baseline.Mean,
+ ["baseline_stddev"] = effectiveStdDev,
+ ["deviation_sigma"] = deviation,
+ ["baseline_samples"] = baseline.SampleCount,
+ ["window_samples"] = windowSamples
+ };
+ AddBaselineContext(metadata, baseline);
+
+ anomalies.Add(new Fact
+ {
+ Source = "anomaly",
+ Key = "ANOMALY_SESSION_SPIKE",
+ Value = peakConnections,
+ ServerId = context.ServerId,
+ Metadata = metadata
+ });
+ }
+ catch (Exception ex)
+ {
+ AppLogger.Error("AnomalyDetector", $"Session anomaly detection failed: {ex.Message}");
+ }
+ }
+
+ ///
+ /// Detects query duration aggregate anomalies using z-score against time-bucketed baseline.
+ /// Measures total elapsed time across all queries per collection interval.
+ ///
+ private async Task DetectQueryDurationAnomalies(AnalysisContext context, List anomalies)
+ {
+ try
+ {
+ var baseline = await _baselineProvider.GetBaselineAsync(
+ context.ServerId, MetricNames.QueryDuration, context.TimeRangeStart);
+
+ if (baseline.SampleCount == 0) return;
+ var effectiveStdDev = baseline.EffectiveStdDev;
+ if (effectiveStdDev <= 0) return;
+
+ using var readLock = _duckDb.AcquireReadLock();
+ using var connection = _duckDb.CreateConnection();
+ await connection.OpenAsync();
+
+ using var cmd = connection.CreateCommand();
+ cmd.CommandText = @"
+WITH per_collection AS (
+ SELECT collection_time,
+ SUM(delta_elapsed_time)::DOUBLE AS total_elapsed
+ FROM v_query_stats
+ WHERE server_id = $1 AND collection_time >= $2 AND collection_time <= $3
+ AND delta_execution_count > 0
+ AND delta_elapsed_time >= 0
+ GROUP BY collection_time
+)
+SELECT AVG(total_elapsed) AS avg_elapsed,
+ MAX(total_elapsed) AS peak_elapsed,
+ COUNT(*) AS sample_count
+FROM per_collection";
+
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeStart });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeEnd });
+
+ using var reader = await cmd.ExecuteReaderAsync();
+ if (!await reader.ReadAsync()) return;
+
+ var avgElapsed = reader.IsDBNull(0) ? 0.0 : Convert.ToDouble(reader.GetValue(0));
+ var peakElapsed = reader.IsDBNull(1) ? 0.0 : Convert.ToDouble(reader.GetValue(1));
+ var windowSamples = reader.IsDBNull(2) ? 0L : Convert.ToInt64(reader.GetValue(2));
+
+ if (windowSamples == 0) return;
+
+ var deviation = (peakElapsed - baseline.Mean) / effectiveStdDev;
+ if (deviation < GetDeviationThreshold(MetricNames.QueryDuration)) return;
+
+ var metadata = new Dictionary
+ {
+ ["peak_total_elapsed_us"] = peakElapsed,
+ ["avg_total_elapsed_us"] = avgElapsed,
+ ["baseline_mean"] = baseline.Mean,
+ ["baseline_stddev"] = effectiveStdDev,
+ ["deviation_sigma"] = deviation,
+ ["baseline_samples"] = baseline.SampleCount,
+ ["window_samples"] = windowSamples
+ };
+ AddBaselineContext(metadata, baseline);
+
+ anomalies.Add(new Fact
+ {
+ Source = "anomaly",
+ Key = "ANOMALY_QUERY_DURATION",
+ Value = peakElapsed,
+ ServerId = context.ServerId,
+ Metadata = metadata
+ });
+ }
+ catch (Exception ex)
+ {
+ AppLogger.Error("AnomalyDetector", $"Query duration anomaly detection failed: {ex.Message}");
+ }
+ }
+
+ ///
+ /// Detects memory utilization anomalies using z-score against time-bucketed baseline.
+ /// Lite-only — Dashboard does not collect memory metrics.
+ /// Measures total_server_memory_mb / target_server_memory_mb as memory pressure %.
+ ///
+ private async Task DetectMemoryAnomalies(AnalysisContext context, List anomalies)
+ {
+ try
+ {
+ var baseline = await _baselineProvider.GetBaselineAsync(
+ context.ServerId, MetricNames.Memory, context.TimeRangeStart);
+
+ if (baseline.SampleCount == 0) return;
+ var effectiveStdDev = baseline.EffectiveStdDev;
+ if (effectiveStdDev <= 0) return;
+
+ using var readLock = _duckDb.AcquireReadLock();
+ using var connection = _duckDb.CreateConnection();
+ await connection.OpenAsync();
+
+ using var cmd = connection.CreateCommand();
+ cmd.CommandText = @"
+SELECT AVG(total_server_memory_mb::DOUBLE / NULLIF(target_server_memory_mb::DOUBLE, 0) * 100) AS avg_pressure,
+ MAX(total_server_memory_mb::DOUBLE / NULLIF(target_server_memory_mb::DOUBLE, 0) * 100) AS peak_pressure,
+ COUNT(*) AS sample_count
+FROM v_memory_stats
+WHERE server_id = $1 AND collection_time >= $2 AND collection_time <= $3
+AND target_server_memory_mb > 0";
+
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.ServerId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeStart });
+ cmd.Parameters.Add(new DuckDBParameter { Value = context.TimeRangeEnd });
+
+ using var reader = await cmd.ExecuteReaderAsync();
+ if (!await reader.ReadAsync()) return;
+
+ var avgPressure = reader.IsDBNull(0) ? 0.0 : Convert.ToDouble(reader.GetValue(0));
+ var peakPressure = reader.IsDBNull(1) ? 0.0 : Convert.ToDouble(reader.GetValue(1));
+ var windowSamples = reader.IsDBNull(2) ? 0L : Convert.ToInt64(reader.GetValue(2));
+
+ if (windowSamples == 0) return;
+
+ var deviation = (peakPressure - baseline.Mean) / effectiveStdDev;
+ if (deviation < GetDeviationThreshold(MetricNames.Memory)) return;
+
+ var metadata = new Dictionary
+ {
+ ["peak_memory_pressure_pct"] = peakPressure,
+ ["avg_memory_pressure_pct"] = avgPressure,
+ ["baseline_mean"] = baseline.Mean,
+ ["baseline_stddev"] = effectiveStdDev,
+ ["deviation_sigma"] = deviation,
+ ["baseline_samples"] = baseline.SampleCount,
+ ["window_samples"] = windowSamples
+ };
+ AddBaselineContext(metadata, baseline);
+
+ anomalies.Add(new Fact
+ {
+ Source = "anomaly",
+ Key = "ANOMALY_MEMORY_PRESSURE",
+ Value = peakPressure,
+ ServerId = context.ServerId,
+ Metadata = metadata
+ });
+ }
+ catch (Exception ex)
+ {
+ AppLogger.Error("AnomalyDetector", $"Memory anomaly detection failed: {ex.Message}");
+ }
+ }
+}
diff --git a/Lite/Analysis/BaselineProvider.cs b/Lite/Analysis/BaselineProvider.cs
new file mode 100644
index 0000000..fc40d2b
--- /dev/null
+++ b/Lite/Analysis/BaselineProvider.cs
@@ -0,0 +1,544 @@
+using System;
+using System.Collections.Concurrent;
+using System.Collections.Generic;
+using System.Linq;
+using System.Threading.Tasks;
+using DuckDB.NET.Data;
+using PerformanceMonitorLite.Database;
+using PerformanceMonitorLite.Services;
+
+namespace PerformanceMonitorLite.Analysis;
+
+///
+/// Provides time-bucketed baselines (hour-of-day x day-of-week) computed from
+/// 30-day rolling history in DuckDB. Replaces the flat 24-hour lookback used
+/// by the previous anomaly detection implementation.
+///
+/// Each baseline bucket contains mean, stddev, and sample count for a metric
+/// at a specific (hour, day-of-week) combination. When a bucket has insufficient
+/// samples, the provider collapses to less-specific tiers:
+/// Full (hour+dow) -> Hour-only -> Flat (global mean/stddev)
+///
+/// Baselines are cached in memory with a 1-hour TTL to avoid redundant
+/// recomputation during rapid re-analysis.
+///
+public class BaselineProvider
+{
+ private readonly DuckDbInitializer _duckDb;
+
+ /// Rolling window for baseline computation.
+ private const int BaselineWindowDays = 30;
+
+ /// Collapse to hour-only when full bucket has fewer than this many samples.
+ private const int CollapseThreshold = 10;
+
+ /// Restore to full bucket when sample count reaches this level (hysteresis).
+ private const int RestoreThreshold = 15;
+
+ /// Cache TTL — baselines are recomputed after this interval.
+ public static TimeSpan CacheTtl { get; set; } = TimeSpan.FromHours(1);
+
+ private readonly ConcurrentDictionary _cache = new();
+
+ public BaselineProvider(DuckDbInitializer duckDb)
+ {
+ _duckDb = duckDb;
+ }
+
+ ///
+ /// Gets the baseline for a specific metric, server, and time bucket.
+ /// Returns the most specific bucket available, collapsing as needed.
+ ///
+ public async Task GetBaselineAsync(
+ int serverId, string metricName, DateTime analysisTime)
+ {
+ var hourOfDay = analysisTime.Hour;
+ var dayOfWeek = (int)analysisTime.DayOfWeek; // Sunday=0
+
+ var baselines = await GetOrComputeBaselinesAsync(serverId, metricName, analysisTime);
+ if (baselines == null || baselines.Count == 0)
+ return BaselineBucket.Empty;
+
+ // Try full bucket (hour + day-of-week)
+ var fullKey = (hourOfDay, dayOfWeek);
+ if (baselines.TryGetValue(fullKey, out var fullBucket) && fullBucket.SampleCount >= RestoreThreshold)
+ return fullBucket;
+
+ // If full bucket exists but below restore threshold, check if it's above collapse threshold
+ // (hysteresis: don't collapse if we're between 10-14 samples and were previously using full)
+ if (fullBucket != null && fullBucket.SampleCount >= CollapseThreshold)
+ return fullBucket;
+
+ // Collapse to hour-only: aggregate all days for this hour
+ var hourBuckets = baselines
+ .Where(kvp => kvp.Key.HourOfDay == hourOfDay)
+ .Select(kvp => kvp.Value)
+ .ToList();
+
+ if (hourBuckets.Count > 0)
+ {
+ var collapsed = CollapseToHourOnly(hourBuckets);
+ if (collapsed.SampleCount >= CollapseThreshold)
+ return collapsed;
+ }
+
+ // Collapse to flat: aggregate everything
+ var allBuckets = baselines.Values.ToList();
+ if (allBuckets.Count > 0)
+ {
+ var flat = CollapseToFlat(allBuckets);
+ if (flat.SampleCount >= 3) // Minimum viable baseline
+ return flat;
+ }
+
+ return BaselineBucket.Empty;
+ }
+
+ ///
+ /// Gets all baseline buckets for a metric/server. Used by UI for rendering
+ /// expected-range bands across all time slots.
+ ///
+ public async Task?> GetAllBaselinesAsync(
+ int serverId, string metricName, DateTime analysisTime)
+ {
+ return await GetOrComputeBaselinesAsync(serverId, metricName, analysisTime);
+ }
+
+ /// Forces cache eviction for a server — used during testing.
+ public void InvalidateCache(int serverId)
+ {
+ var keysToRemove = _cache.Keys.Where(k => k.StartsWith($"{serverId}:", StringComparison.Ordinal)).ToList();
+ foreach (var key in keysToRemove)
+ _cache.TryRemove(key, out _);
+ }
+
+ /// Forces full cache clear — used during testing.
+ public void ClearCache() => _cache.Clear();
+
+ private async Task?> GetOrComputeBaselinesAsync(
+ int serverId, string metricName, DateTime analysisTime)
+ {
+ var cacheKey = $"{serverId}:{metricName}";
+ var roundedHour = new DateTime(analysisTime.Year, analysisTime.Month, analysisTime.Day, analysisTime.Hour, 0, 0);
+
+ if (_cache.TryGetValue(cacheKey, out var cached) &&
+ cached.ComputedAt == roundedHour &&
+ (DateTime.UtcNow - cached.RealTime) < CacheTtl)
+ {
+ return cached.Buckets;
+ }
+
+ var buckets = await ComputeBaselinesAsync(serverId, metricName, analysisTime);
+
+ _cache[cacheKey] = new CachedBaseline
+ {
+ ComputedAt = roundedHour,
+ RealTime = DateTime.UtcNow,
+ Buckets = buckets
+ };
+
+ return buckets;
+ }
+
+ private async Task?> ComputeBaselinesAsync(
+ int serverId, string metricName, DateTime analysisTime)
+ {
+ var query = GetBaselineQuery(metricName);
+ if (query == null) return null;
+
+ var windowStart = analysisTime.AddDays(-BaselineWindowDays);
+
+ try
+ {
+ using var readLock = _duckDb.AcquireReadLock();
+ using var connection = _duckDb.CreateConnection();
+ await connection.OpenAsync();
+
+ using var cmd = connection.CreateCommand();
+ cmd.CommandText = query;
+ cmd.Parameters.Add(new DuckDBParameter { Value = serverId });
+ cmd.Parameters.Add(new DuckDBParameter { Value = windowStart });
+ cmd.Parameters.Add(new DuckDBParameter { Value = analysisTime });
+
+ var buckets = new Dictionary<(int, int), BaselineBucket>();
+
+ using var reader = await cmd.ExecuteReaderAsync();
+ while (await reader.ReadAsync())
+ {
+ var hour = Convert.ToInt32(reader.GetValue(0));
+ var dow = Convert.ToInt32(reader.GetValue(1));
+ var mean = reader.IsDBNull(2) ? 0.0 : Convert.ToDouble(reader.GetValue(2));
+ var stddev = reader.IsDBNull(3) ? 0.0 : Convert.ToDouble(reader.GetValue(3));
+ var count = reader.IsDBNull(4) ? 0L : Convert.ToInt64(reader.GetValue(4));
+
+ buckets[(hour, dow)] = new BaselineBucket
+ {
+ HourOfDay = hour,
+ DayOfWeek = dow,
+ Mean = mean,
+ StdDev = stddev,
+ SampleCount = count,
+ Tier = count >= RestoreThreshold ? BaselineTier.Full
+ : count >= CollapseThreshold ? BaselineTier.Full
+ : BaselineTier.HourOnly
+ };
+ }
+
+ return buckets;
+ }
+ catch (Exception ex)
+ {
+ AppLogger.Error("BaselineProvider", $"Failed to compute baselines for {metricName}: {ex.Message}");
+ return null;
+ }
+ }
+
+ private static string? GetBaselineQuery(string metricName)
+ {
+ // All queries return: hour_of_day, day_of_week, mean_val, stddev_val, sample_count
+ // Cumulative metrics (batch requests, wait stats, query duration) use CTEs for
+ // restart poisoning exclusion — exclude samples where value drops to near-zero
+ // when the prior sample was significantly higher.
+ // Multi-row-per-collection metrics (waits, sessions, queries) aggregate per
+ // collection_time first, then bucket by hour+dow.
+ return metricName switch
+ {
+ // Point-in-time metric — no restart exclusion needed
+ MetricNames.Cpu => @"
+SELECT EXTRACT(HOUR FROM collection_time)::INT AS hour_of_day,
+ EXTRACT(DOW FROM collection_time)::INT AS day_of_week,
+ AVG(sqlserver_cpu_utilization) AS mean_val,
+ STDDEV_SAMP(sqlserver_cpu_utilization) AS stddev_val,
+ COUNT(*) AS sample_count
+FROM v_cpu_utilization_stats
+WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+GROUP BY hour_of_day, day_of_week",
+
+ // Cumulative counter — restart exclusion via subquery with QUALIFY.
+ // Excludes samples where delta drops to 0 when prior sample was > 1000
+ // (restart signature for cumulative counters).
+ MetricNames.BatchRequests => @"
+SELECT EXTRACT(HOUR FROM collection_time)::INT AS hour_of_day,
+ EXTRACT(DOW FROM collection_time)::INT AS day_of_week,
+ AVG(delta_cntr_value) AS mean_val,
+ STDDEV_SAMP(delta_cntr_value) AS stddev_val,
+ COUNT(*) AS sample_count
+FROM (
+ SELECT collection_time, delta_cntr_value
+ FROM v_perfmon_stats
+ WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+ AND counter_name = 'Batch Requests/sec'
+ AND delta_cntr_value >= 0
+ QUALIFY NOT (delta_cntr_value = 0
+ AND COALESCE(LAG(delta_cntr_value) OVER (ORDER BY collection_time), 0) > 1000)
+)
+GROUP BY hour_of_day, day_of_week",
+
+ // Cumulative counter, multiple rows per collection (per wait type) —
+ // aggregate to total wait ms per collection first, then QUALIFY for restart exclusion
+ MetricNames.WaitStats => @"
+WITH per_collection AS (
+ SELECT collection_time,
+ SUM(delta_wait_time_ms) AS total_wait_ms
+ FROM v_wait_stats
+ WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+ AND delta_wait_time_ms >= 0
+ GROUP BY collection_time
+ QUALIFY NOT (total_wait_ms = 0
+ AND COALESCE(LAG(total_wait_ms) OVER (ORDER BY collection_time), 0) > 10000)
+)
+SELECT EXTRACT(HOUR FROM collection_time)::INT AS hour_of_day,
+ EXTRACT(DOW FROM collection_time)::INT AS day_of_week,
+ AVG(total_wait_ms) AS mean_val,
+ STDDEV_SAMP(total_wait_ms) AS stddev_val,
+ COUNT(*) AS sample_count
+FROM per_collection
+GROUP BY hour_of_day, day_of_week",
+
+ // Point-in-time, multiple rows per collection (per program_name) —
+ // aggregate to total connections per collection first
+ MetricNames.SessionCount => @"
+WITH per_collection AS (
+ SELECT collection_time,
+ SUM(connection_count) AS total_connections
+ FROM v_session_stats
+ WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+ GROUP BY collection_time
+)
+SELECT EXTRACT(HOUR FROM collection_time)::INT AS hour_of_day,
+ EXTRACT(DOW FROM collection_time)::INT AS day_of_week,
+ AVG(total_connections) AS mean_val,
+ STDDEV_SAMP(total_connections) AS stddev_val,
+ COUNT(*) AS sample_count
+FROM per_collection
+GROUP BY hour_of_day, day_of_week",
+
+ // Cumulative (plan cache), multiple rows per collection (per query) —
+ // use delta columns, aggregate total elapsed per collection, QUALIFY for restart exclusion
+ MetricNames.QueryDuration => @"
+WITH per_collection AS (
+ SELECT collection_time,
+ SUM(delta_elapsed_time) AS total_elapsed
+ FROM v_query_stats
+ WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+ AND delta_execution_count > 0
+ AND delta_elapsed_time >= 0
+ GROUP BY collection_time
+ QUALIFY NOT (total_elapsed = 0
+ AND COALESCE(LAG(total_elapsed) OVER (ORDER BY collection_time), 0) > 100000)
+)
+SELECT EXTRACT(HOUR FROM collection_time)::INT AS hour_of_day,
+ EXTRACT(DOW FROM collection_time)::INT AS day_of_week,
+ AVG(total_elapsed) AS mean_val,
+ STDDEV_SAMP(total_elapsed) AS stddev_val,
+ COUNT(*) AS sample_count
+FROM per_collection
+GROUP BY hour_of_day, day_of_week",
+
+ // Point-in-time metric — no restart exclusion needed
+ MetricNames.IoLatency => @"
+SELECT EXTRACT(HOUR FROM collection_time)::INT AS hour_of_day,
+ EXTRACT(DOW FROM collection_time)::INT AS day_of_week,
+ AVG(delta_stall_read_ms * 1.0 / NULLIF(delta_reads, 0)) AS mean_val,
+ STDDEV_SAMP(delta_stall_read_ms * 1.0 / NULLIF(delta_reads, 0)) AS stddev_val,
+ COUNT(*) AS sample_count
+FROM v_file_io_stats
+WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+AND (delta_reads > 0 OR delta_writes > 0)
+GROUP BY hour_of_day, day_of_week",
+
+ // Event-based — mean = events per day for this bucket, sample_count = distinct days observed.
+ // No restart exclusion needed (event counts, not cumulative).
+ MetricNames.Blocking => @"
+SELECT EXTRACT(HOUR FROM collection_time)::INT AS hour_of_day,
+ EXTRACT(DOW FROM collection_time)::INT AS day_of_week,
+ COUNT(*)::DOUBLE / GREATEST(COUNT(DISTINCT collection_time::DATE), 1) AS mean_val,
+ 0::DOUBLE AS stddev_val,
+ COUNT(DISTINCT collection_time::DATE) AS sample_count
+FROM v_blocked_process_reports
+WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+GROUP BY hour_of_day, day_of_week",
+
+ // Event-based — same approach as blocking
+ MetricNames.Deadlock => @"
+SELECT EXTRACT(HOUR FROM collection_time)::INT AS hour_of_day,
+ EXTRACT(DOW FROM collection_time)::INT AS day_of_week,
+ COUNT(*)::DOUBLE / GREATEST(COUNT(DISTINCT collection_time::DATE), 1) AS mean_val,
+ 0::DOUBLE AS stddev_val,
+ COUNT(DISTINCT collection_time::DATE) AS sample_count
+FROM v_deadlocks
+WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+GROUP BY hour_of_day, day_of_week",
+
+ // Point-in-time metric (memory pressure %) — no restart exclusion needed
+ MetricNames.Memory => @"
+SELECT EXTRACT(HOUR FROM collection_time)::INT AS hour_of_day,
+ EXTRACT(DOW FROM collection_time)::INT AS day_of_week,
+ AVG(total_server_memory_mb::DOUBLE / NULLIF(target_server_memory_mb::DOUBLE, 0) * 100) AS mean_val,
+ STDDEV_SAMP(total_server_memory_mb::DOUBLE / NULLIF(target_server_memory_mb::DOUBLE, 0) * 100) AS stddev_val,
+ COUNT(*) AS sample_count
+FROM v_memory_stats
+WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+AND target_server_memory_mb > 0
+GROUP BY hour_of_day, day_of_week",
+
+ // ── Chart-unit baselines (for UI bands — units match what the chart displays) ──
+
+ // Buffer pool MB (chart shows this, not pressure %)
+ MetricNames.MemoryBufferPoolMb => @"
+SELECT EXTRACT(HOUR FROM collection_time)::INT AS hour_of_day,
+ EXTRACT(DOW FROM collection_time)::INT AS day_of_week,
+ AVG(buffer_pool_mb::DOUBLE) AS mean_val,
+ STDDEV_SAMP(buffer_pool_mb::DOUBLE) AS stddev_val,
+ COUNT(*) AS sample_count
+FROM v_memory_stats
+WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+AND buffer_pool_mb > 0
+GROUP BY hour_of_day, day_of_week",
+
+ // Wait ms per second (chart shows this, not total ms per collection)
+ MetricNames.WaitMsPerSec => @"
+WITH per_collection AS (
+ SELECT collection_time,
+ SUM(delta_wait_time_ms)::DOUBLE AS total_wait_ms,
+ date_diff('second', LAG(collection_time) OVER (ORDER BY collection_time), collection_time) AS interval_sec
+ FROM v_wait_stats
+ WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+ AND delta_wait_time_ms >= 0
+ GROUP BY collection_time
+),
+with_rate AS (
+ SELECT collection_time,
+ CASE WHEN interval_sec > 0 THEN total_wait_ms / interval_sec ELSE 0 END AS ms_per_sec
+ FROM per_collection
+ WHERE interval_sec IS NOT NULL
+ QUALIFY NOT (ms_per_sec = 0
+ AND COALESCE(LAG(ms_per_sec) OVER (ORDER BY collection_time), 0) > 100)
+)
+SELECT EXTRACT(HOUR FROM collection_time)::INT AS hour_of_day,
+ EXTRACT(DOW FROM collection_time)::INT AS day_of_week,
+ AVG(ms_per_sec) AS mean_val,
+ STDDEV_SAMP(ms_per_sec) AS stddev_val,
+ COUNT(*) AS sample_count
+FROM with_rate
+GROUP BY hour_of_day, day_of_week",
+
+ // Blocking events per minute (chart shows event bars bucketed by minute)
+ MetricNames.BlockingPerMinute => @"
+WITH per_minute AS (
+ SELECT DATE_TRUNC('minute', collection_time) AS minute_bucket,
+ COUNT(*)::DOUBLE AS event_count
+ FROM v_blocked_process_reports
+ WHERE server_id = $1 AND collection_time >= $2 AND collection_time < $3
+ GROUP BY minute_bucket
+)
+SELECT EXTRACT(HOUR FROM minute_bucket)::INT AS hour_of_day,
+ EXTRACT(DOW FROM minute_bucket)::INT AS day_of_week,
+ AVG(event_count) AS mean_val,
+ STDDEV_SAMP(event_count) AS stddev_val,
+ COUNT(*) AS sample_count
+FROM per_minute
+GROUP BY hour_of_day, day_of_week",
+
+ _ => null
+ };
+ }
+
+ ///
+ /// Collapses multiple day-of-week buckets for the same hour into a single
+ /// hour-only bucket using pooled statistics.
+ ///
+ private static BaselineBucket CollapseToHourOnly(List hourBuckets)
+ {
+ var totalSamples = hourBuckets.Sum(b => b.SampleCount);
+ if (totalSamples == 0)
+ return BaselineBucket.Empty;
+
+ // Weighted mean across all day-of-week buckets for this hour
+ var weightedMean = hourBuckets.Sum(b => b.Mean * b.SampleCount) / totalSamples;
+
+ // Pooled standard deviation
+ var pooledVariance = PoolVariance(hourBuckets, weightedMean);
+
+ return new BaselineBucket
+ {
+ HourOfDay = hourBuckets[0].HourOfDay,
+ DayOfWeek = -1, // Indicates hour-only
+ Mean = weightedMean,
+ StdDev = Math.Sqrt(pooledVariance),
+ SampleCount = totalSamples,
+ Tier = BaselineTier.HourOnly
+ };
+ }
+
+ ///
+ /// Collapses all buckets into a single flat baseline (equivalent to old 24h behavior).
+ ///
+ private static BaselineBucket CollapseToFlat(List allBuckets)
+ {
+ var totalSamples = allBuckets.Sum(b => b.SampleCount);
+ if (totalSamples == 0)
+ return BaselineBucket.Empty;
+
+ var weightedMean = allBuckets.Sum(b => b.Mean * b.SampleCount) / totalSamples;
+ var pooledVariance = PoolVariance(allBuckets, weightedMean);
+
+ return new BaselineBucket
+ {
+ HourOfDay = -1,
+ DayOfWeek = -1,
+ Mean = weightedMean,
+ StdDev = Math.Sqrt(pooledVariance),
+ SampleCount = totalSamples,
+ Tier = BaselineTier.Flat
+ };
+ }
+
+ ///
+ /// Computes pooled variance from multiple buckets, accounting for both
+ /// within-bucket variance and between-bucket mean differences.
+ ///
+ private static double PoolVariance(List buckets, double grandMean)
+ {
+ var totalSamples = buckets.Sum(b => b.SampleCount);
+ if (totalSamples <= 1) return 0;
+
+ double totalSumSq = 0;
+ foreach (var b in buckets)
+ {
+ if (b.SampleCount <= 0) continue;
+ // Within-bucket variance contribution
+ totalSumSq += (b.StdDev * b.StdDev) * (b.SampleCount - 1);
+ // Between-bucket mean difference contribution
+ totalSumSq += b.SampleCount * (b.Mean - grandMean) * (b.Mean - grandMean);
+ }
+
+ return totalSumSq / (totalSamples - 1);
+ }
+
+ private class CachedBaseline
+ {
+ public DateTime ComputedAt { get; init; }
+ public DateTime RealTime { get; init; }
+ public Dictionary<(int HourOfDay, int DayOfWeek), BaselineBucket>? Buckets { get; init; }
+ }
+}
+
+///
+/// Represents the computed baseline statistics for a single time bucket.
+///
+public class BaselineBucket
+{
+ public int HourOfDay { get; init; }
+ public int DayOfWeek { get; init; }
+ public double Mean { get; init; }
+ public double StdDev { get; init; }
+ public long SampleCount { get; init; }
+ public BaselineTier Tier { get; init; }
+
+ public static BaselineBucket Empty => new()
+ {
+ HourOfDay = -1, DayOfWeek = -1, Mean = 0, StdDev = 0,
+ SampleCount = 0, Tier = BaselineTier.Flat
+ };
+
+ ///
+ /// Returns the effective stddev with a proportional minimum floor to prevent
+ /// division-by-zero in z-score calculations. When both mean and stddev are 0
+ /// (zero activity), returns 0 — callers should skip scoring.
+ ///
+ public double EffectiveStdDev
+ {
+ get
+ {
+ if (Mean == 0 && StdDev <= 0) return 0; // Zero activity — skip scoring
+ return Math.Max(StdDev, Mean * 0.01);
+ }
+ }
+}
+
+public enum BaselineTier
+{
+ Full, // hour + day-of-week (168 buckets)
+ HourOnly, // hour only (24 buckets)
+ Flat // global mean/stddev
+}
+
+/// Metric name constants used as baseline cache keys.
+public static class MetricNames
+{
+ public const string Cpu = "cpu";
+ public const string BatchRequests = "batch_requests";
+ public const string WaitStats = "wait_stats";
+ public const string SessionCount = "session_count";
+ public const string QueryDuration = "query_duration";
+ public const string IoLatency = "io_latency";
+ public const string Blocking = "blocking";
+ public const string Deadlock = "deadlock";
+ public const string Memory = "memory";
+
+ // Chart-unit metrics (for UI bands — units match what the chart displays)
+ public const string MemoryBufferPoolMb = "memory_buffer_pool_mb";
+ public const string WaitMsPerSec = "wait_ms_per_sec";
+ public const string BlockingPerMinute = "blocking_per_minute";
+}
diff --git a/Lite/Analysis/FactScorer.cs b/Lite/Analysis/FactScorer.cs
index 1ec47f7..e42b4e9 100644
--- a/Lite/Analysis/FactScorer.cs
+++ b/Lite/Analysis/FactScorer.cs
@@ -308,8 +308,13 @@ private static double ScoreBadActorFact(Fact fact)
///
private static double ScoreAnomalyFact(Fact fact)
{
- if (fact.Key.StartsWith("ANOMALY_CPU_SPIKE", StringComparison.OrdinalIgnoreCase) || fact.Key.StartsWith("ANOMALY_READ_LATENCY", StringComparison.OrdinalIgnoreCase)
- || fact.Key.StartsWith("ANOMALY_WRITE_LATENCY", StringComparison.OrdinalIgnoreCase))
+ if (fact.Key.StartsWith("ANOMALY_CPU_SPIKE", StringComparison.OrdinalIgnoreCase)
+ || fact.Key.StartsWith("ANOMALY_READ_LATENCY", StringComparison.OrdinalIgnoreCase)
+ || fact.Key.StartsWith("ANOMALY_WRITE_LATENCY", StringComparison.OrdinalIgnoreCase)
+ || fact.Key.StartsWith("ANOMALY_BATCH_REQUESTS", StringComparison.OrdinalIgnoreCase)
+ || fact.Key.StartsWith("ANOMALY_SESSION_SPIKE", StringComparison.OrdinalIgnoreCase)
+ || fact.Key.StartsWith("ANOMALY_QUERY_DURATION", StringComparison.OrdinalIgnoreCase)
+ || fact.Key.StartsWith("ANOMALY_MEMORY_PRESSURE", StringComparison.OrdinalIgnoreCase))
{
// Deviation-based scoring: 2σ = 0.5, 4σ = 1.0
var deviation = fact.Metadata.GetValueOrDefault("deviation_sigma");
diff --git a/Lite/Analysis/FindingStore.cs b/Lite/Analysis/FindingStore.cs
index 6724445..7611e51 100644
--- a/Lite/Analysis/FindingStore.cs
+++ b/Lite/Analysis/FindingStore.cs
@@ -60,7 +60,8 @@ public async Task> SaveFindingsAsync(
RootFactValue = story.RootFactValue,
LeafFactKey = story.LeafFactKey,
LeafFactValue = story.LeafFactValue,
- FactCount = story.FactCount
+ FactCount = story.FactCount,
+ RootFactMetadata = story.RootFactMetadata
};
await InsertFindingAsync(finding);
diff --git a/Lite/Analysis/InferenceEngine.cs b/Lite/Analysis/InferenceEngine.cs
index 4ef4dc8..fdc7985 100644
--- a/Lite/Analysis/InferenceEngine.cs
+++ b/Lite/Analysis/InferenceEngine.cs
@@ -150,7 +150,8 @@ private static AnalysisStory BuildStory(List path, Dictionary();
foreach (var col in grid.Columns)
- headers.Add(CsvEscape(col.Header?.ToString() ?? ""));
+ headers.Add(CsvEscape(DataGridClipboardBehavior.GetHeaderText(col)));
sb.AppendLine(string.Join(",", headers));
foreach (var item in grid.Items)
diff --git a/Lite/Controls/CorrelatedTimelineLanesControl.xaml b/Lite/Controls/CorrelatedTimelineLanesControl.xaml
new file mode 100644
index 0000000..f0f1871
--- /dev/null
+++ b/Lite/Controls/CorrelatedTimelineLanesControl.xaml
@@ -0,0 +1,80 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Lite/Controls/CorrelatedTimelineLanesControl.xaml.cs b/Lite/Controls/CorrelatedTimelineLanesControl.xaml.cs
new file mode 100644
index 0000000..d39775a
--- /dev/null
+++ b/Lite/Controls/CorrelatedTimelineLanesControl.xaml.cs
@@ -0,0 +1,540 @@
+/*
+ * Copyright (c) 2026 Erik Darling, Darling Data LLC
+ *
+ * This file is part of the SQL Server Performance Monitor Lite.
+ *
+ * Licensed under the MIT License. See LICENSE file in the project root for full license information.
+ *
+ * SYNC WARNING: Dashboard has a matching copy at Dashboard/Controls/CorrelatedTimelineLanesControl.xaml.cs.
+ * Changes here must be mirrored there.
+ */
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Threading.Tasks;
+using System.Windows;
+using System.Windows.Controls;
+using PerformanceMonitorLite.Analysis;
+using PerformanceMonitorLite.Helpers;
+using PerformanceMonitorLite.Services;
+
+namespace PerformanceMonitorLite.Controls;
+
+public partial class CorrelatedTimelineLanesControl : UserControl
+{
+ private LocalDataService? _dataService;
+ private int _serverId;
+ private CorrelatedCrosshairManager? _crosshairManager;
+ private bool _isRefreshing;
+
+ public CorrelatedTimelineLanesControl()
+ {
+ InitializeComponent();
+ Unloaded += (_, _) => _crosshairManager?.Dispose();
+ }
+
+ ///
+ /// Initializes the control with the data service and server ID.
+ /// Must be called before RefreshAsync.
+ ///
+ public void Initialize(LocalDataService dataService, int serverId)
+ {
+ _dataService = dataService;
+ _serverId = serverId;
+
+ var charts = new[] { CpuChart, WaitStatsChart, BlockingChart, MemoryChart, FileIoChart };
+ foreach (var chart in charts)
+ {
+ ApplyTheme(chart);
+ // Disable zoom/pan/drag but keep mouse events for crosshair
+ chart.UserInputProcessor.UserActionResponses.Clear();
+ }
+
+ _crosshairManager = new CorrelatedCrosshairManager();
+ _crosshairManager.AddLane(CpuChart, "CPU", "%");
+ _crosshairManager.AddLane(WaitStatsChart, "Wait Stats", "ms/sec");
+ _crosshairManager.AddLane(BlockingChart, "Blocking", "events");
+ _crosshairManager.AddLane(MemoryChart, "Buffer Pool", "MB");
+ _crosshairManager.AddLane(FileIoChart, "I/O Latency", "ms");
+ }
+
+ ///
+ /// Refreshes all lane data for the given time range.
+ ///
+ public async Task RefreshAsync(int hoursBack, DateTime? fromDate, DateTime? toDate,
+ (DateTime From, DateTime To)? comparisonRange = null)
+ {
+ if (_dataService == null || _isRefreshing) return;
+ _isRefreshing = true;
+
+ try
+ {
+ _crosshairManager?.PrepareForRefresh();
+
+ var cpuTask = _dataService.GetCpuUtilizationAsync(_serverId, hoursBack, fromDate, toDate);
+ var waitTask = _dataService.GetTotalWaitTrendAsync(_serverId, hoursBack, fromDate, toDate);
+ var blockingTask = _dataService.GetBlockingTrendAsync(_serverId, hoursBack, fromDate, toDate);
+ var deadlockTask = _dataService.GetDeadlockTrendAsync(_serverId, hoursBack, fromDate, toDate);
+ var memoryTask = _dataService.GetMemoryTrendAsync(_serverId, hoursBack, fromDate, toDate);
+ var fileIoTask = _dataService.GetFileIoLatencyTrendAsync(_serverId, hoursBack, fromDate, toDate);
+
+ // Fetch baselines for band rendering — chart-unit-matched metrics
+ var referenceTime = fromDate ?? DateTime.UtcNow.AddHours(-hoursBack);
+ var cpuBaselineTask = _dataService.GetBaselineForLaneAsync(_serverId, MetricNames.Cpu, referenceTime);
+ var waitBaselineTask = _dataService.GetBaselineForLaneAsync(_serverId, MetricNames.WaitMsPerSec, referenceTime);
+ var ioBaselineTask = _dataService.GetBaselineForLaneAsync(_serverId, MetricNames.IoLatency, referenceTime);
+ var blockingBaselineTask = _dataService.GetBaselineForLaneAsync(_serverId, MetricNames.BlockingPerMinute, referenceTime);
+
+ try
+ {
+ await Task.WhenAll(cpuTask, waitTask, blockingTask, deadlockTask, memoryTask, fileIoTask,
+ cpuBaselineTask, waitBaselineTask, ioBaselineTask, blockingBaselineTask);
+ }
+ catch (Exception ex)
+ {
+ AppLogger.Info("CorrelatedLanes", $"Data fetch failed: {ex.Message}");
+ }
+
+ var cpuBaseline = cpuBaselineTask.IsCompletedSuccessfully ? cpuBaselineTask.Result : null;
+ var waitBaseline = waitBaselineTask.IsCompletedSuccessfully ? waitBaselineTask.Result : null;
+ var ioBaseline = ioBaselineTask.IsCompletedSuccessfully ? ioBaselineTask.Result : null;
+ var blockingBaseline = blockingBaselineTask.IsCompletedSuccessfully ? blockingBaselineTask.Result : null;
+
+ var utcOffset = ServerTimeHelper.UtcOffsetMinutes;
+
+ // minAnomalyValue: absolute floor below which dots/arrows are suppressed even if outside band.
+ // Prevents "1% CPU above 0.5% baseline" false alarms on idle servers.
+ if (cpuTask.IsCompletedSuccessfully)
+ UpdateLane(CpuChart, "CPU %",
+ cpuTask.Result.Select(d => (d.SampleTime.ToOADate(), (double)d.SqlServerCpu)).ToList(),
+ "#4FC3F7", 0, 105, cpuBaseline, minAnomalyValue: 10);
+ else
+ ShowEmpty(CpuChart, "CPU %");
+
+ if (waitTask.IsCompletedSuccessfully)
+ UpdateLane(WaitStatsChart, "Wait ms/sec",
+ waitTask.Result.Select(d => (d.CollectionTime.AddMinutes(utcOffset).ToOADate(), d.WaitTimeMsPerSecond)).ToList(),
+ "#FFB74D", baseline: waitBaseline, minAnomalyValue: 100);
+ else
+ ShowEmpty(WaitStatsChart, "Wait ms/sec");
+
+ {
+ var blockingData = blockingTask.IsCompletedSuccessfully
+ ? blockingTask.Result.Select(d => (d.Time.AddMinutes(utcOffset).ToOADate(), (double)d.Count)).ToList()
+ : new List<(double, double)>();
+ var deadlockData = deadlockTask.IsCompletedSuccessfully
+ ? deadlockTask.Result.Select(d => (d.Time.AddMinutes(utcOffset).ToOADate(), (double)d.Count)).ToList()
+ : new List<(double, double)>();
+ UpdateBlockingLane(blockingData, deadlockData, blockingBaseline);
+ }
+
+ if (memoryTask.IsCompletedSuccessfully)
+ UpdateLane(MemoryChart, "Buffer Pool MB",
+ memoryTask.Result.Select(d => (d.CollectionTime.AddMinutes(utcOffset).ToOADate(), d.BufferPoolMb)).ToList(),
+ "#CE93D8");
+ else
+ ShowEmpty(MemoryChart, "Memory MB");
+
+ if (fileIoTask.IsCompletedSuccessfully)
+ {
+ var ioGrouped = fileIoTask.Result
+ .GroupBy(d => d.CollectionTime)
+ .OrderBy(g => g.Key)
+ .Select(g => (g.Key.AddMinutes(utcOffset).ToOADate(), g.Average(x => x.AvgReadLatencyMs)))
+ .ToList();
+ UpdateLane(FileIoChart, "I/O ms", ioGrouped, "#81C784", baseline: ioBaseline, minAnomalyValue: 2);
+ }
+ else
+ ShowEmpty(FileIoChart, "I/O ms");
+
+ // Comparison overlay — fetch reference period data and render as ghost lines
+ if (comparisonRange.HasValue)
+ {
+ var refFrom = comparisonRange.Value.From;
+ var refTo = comparisonRange.Value.To;
+ // Time shift: offset to align reference data with current chart X axis
+ var timeShift = (fromDate ?? DateTime.UtcNow.AddHours(-hoursBack)) - refFrom;
+
+ var refCpuTask = _dataService.GetCpuUtilizationAsync(_serverId, 0, refFrom, refTo);
+ var refWaitTask = _dataService.GetTotalWaitTrendAsync(_serverId, 0, refFrom, refTo);
+ var refBlockingTask = _dataService.GetBlockingTrendAsync(_serverId, 0, refFrom, refTo);
+ var refMemoryTask = _dataService.GetMemoryTrendAsync(_serverId, 0, refFrom, refTo);
+ var refIoTask = _dataService.GetFileIoLatencyTrendAsync(_serverId, 0, refFrom, refTo);
+
+ try { await Task.WhenAll(refCpuTask, refWaitTask, refBlockingTask, refMemoryTask, refIoTask); }
+ catch (Exception ex) { AppLogger.Info("CorrelatedLanes", $"Comparison fetch failed: {ex.Message}"); }
+
+ AppLogger.Info("CorrelatedLanes",
+ $"Comparison: refFrom={refFrom:o}, refTo={refTo:o}, shift={timeShift.TotalHours:F1}h, " +
+ $"cpuRows={refCpuTask.Result?.Count ?? 0}, waitRows={refWaitTask.Result?.Count ?? 0}");
+
+ if (refCpuTask.IsCompletedSuccessfully)
+ AddGhostLine(CpuChart, refCpuTask.Result
+ .Select(d => (d.SampleTime.Add(timeShift).ToOADate(), (double)d.SqlServerCpu)).ToList(), "#4FC3F7");
+
+ if (refWaitTask.IsCompletedSuccessfully)
+ AddGhostLine(WaitStatsChart, refWaitTask.Result
+ .Select(d => (d.CollectionTime.AddMinutes(utcOffset).Add(timeShift).ToOADate(), d.WaitTimeMsPerSecond)).ToList(), "#FFB74D");
+
+ if (refBlockingTask.IsCompletedSuccessfully)
+ {
+ var refBlocking = refBlockingTask.Result
+ .Select(d => (d.Time.AddMinutes(utcOffset).Add(timeShift).ToOADate(), (double)d.Count)).ToList();
+ if (refBlocking.Count > 0)
+ AddGhostLine(BlockingChart, refBlocking, "#E57373");
+ }
+
+ if (refMemoryTask.IsCompletedSuccessfully)
+ AddGhostLine(MemoryChart, refMemoryTask.Result
+ .Select(d => (d.CollectionTime.AddMinutes(utcOffset).Add(timeShift).ToOADate(), d.BufferPoolMb)).ToList(), "#CE93D8");
+
+ if (refIoTask.IsCompletedSuccessfully)
+ {
+ var refIo = refIoTask.Result
+ .GroupBy(d => d.CollectionTime)
+ .OrderBy(g => g.Key)
+ .Select(g => (g.Key.AddMinutes(utcOffset).Add(timeShift).ToOADate(), g.Average(x => x.AvgReadLatencyMs)))
+ .ToList();
+ AddGhostLine(FileIoChart, refIo, "#81C784");
+ }
+
+ // Register reference data with crosshair manager for tooltip
+ _crosshairManager?.SetComparisonLabel(ComparisonLabel(comparisonRange.Value, fromDate, hoursBack));
+ }
+
+ _crosshairManager?.ReattachVLines();
+ SyncXAxes(hoursBack, fromDate, toDate, utcOffset);
+ }
+ finally
+ {
+ _isRefreshing = false;
+ }
+ }
+
+ private void UpdateBlockingLane(List<(double Time, double Value)> blockingData,
+ List<(double Time, double Value)> deadlockData, BaselineBucket? baseline = null)
+ {
+ ClearChart(BlockingChart);
+ ApplyTheme(BlockingChart);
+
+ // Register blocking and deadlock as separate named series for the tooltip
+ var blockTimes = blockingData.Select(d => d.Time).ToArray();
+ var blockValues = blockingData.Select(d => d.Value).ToArray();
+ var deadTimes = deadlockData.Select(d => d.Time).ToArray();
+ var deadValues = deadlockData.Select(d => d.Value).ToArray();
+
+ // First series clears any previous data
+ _crosshairManager?.SetLaneData(BlockingChart, blockTimes, blockValues, isEventBased: true);
+ // Rename the auto-created series and add the second
+ _crosshairManager?.AddLaneSeries(BlockingChart, "Deadlocks", "events",
+ deadTimes, deadValues, isEventBased: true);
+
+ if (blockingData.Count == 0 && deadlockData.Count == 0)
+ {
+ ShowEmpty(BlockingChart, "Block/Dead");
+ return;
+ }
+
+ double barWidth = 30.0 / 86400.0;
+ double maxCount = 0;
+
+ // Blocking bars — red
+ if (blockingData.Count > 0)
+ {
+ var bars = blockingData.Select(d => new ScottPlot.Bar
+ {
+ Position = d.Time,
+ Value = d.Value,
+ Size = barWidth,
+ FillColor = ScottPlot.Color.FromHex("#E57373"),
+ LineWidth = 0
+ }).ToArray();
+ BlockingChart.Plot.Add.Bars(bars);
+ maxCount = Math.Max(maxCount, blockingData.Max(d => d.Value));
+ }
+
+ // Deadlock bars — yellow/amber, slightly narrower so both are visible
+ if (deadlockData.Count > 0)
+ {
+ var bars = deadlockData.Select(d => new ScottPlot.Bar
+ {
+ Position = d.Time,
+ Value = d.Value,
+ Size = barWidth * 0.6,
+ FillColor = ScottPlot.Color.FromHex("#FFD54F"),
+ LineWidth = 0
+ }).ToArray();
+ BlockingChart.Plot.Add.Bars(bars);
+ maxCount = Math.Max(maxCount, deadlockData.Max(d => d.Value));
+ }
+
+ // Baseline band for blocking
+ if (baseline != null && baseline.SampleCount > 0 && baseline.EffectiveStdDev > 0)
+ {
+ var upper = baseline.Mean + 2 * baseline.EffectiveStdDev;
+ var lower = Math.Max(0, baseline.Mean - 2 * baseline.EffectiveStdDev);
+
+ _crosshairManager?.SetLaneBaseline(BlockingChart, lower, upper, isEventBased: true);
+
+ var band = BlockingChart.Plot.Add.HorizontalSpan(lower, upper);
+ band.FillStyle.Color = ScottPlot.Color.FromHex("#E57373").WithAlpha(25);
+ band.LineStyle.Width = 0;
+
+ var meanLine = BlockingChart.Plot.Add.HorizontalLine(baseline.Mean);
+ meanLine.Color = ScottPlot.Color.FromHex("#E57373").WithAlpha(60);
+ meanLine.LinePattern = ScottPlot.LinePattern.Dashed;
+ meanLine.LineWidth = 1;
+ }
+
+ BlockingChart.Plot.Axes.DateTimeTicksBottom();
+ BlockingChart.Plot.Axes.Bottom.TickLabelStyle.IsVisible = false;
+ ReapplyAxisColors(BlockingChart);
+
+ BlockingChart.Plot.Title("");
+ BlockingChart.Plot.YLabel("");
+ BlockingChart.Plot.Legend.IsVisible = false;
+ BlockingChart.Plot.Axes.Margins(bottom: 0);
+ BlockingChart.Plot.Axes.SetLimitsY(0, Math.Max(maxCount * 1.3, 2));
+
+ BlockingChart.Refresh();
+ }
+
+ private void UpdateLane(ScottPlot.WPF.WpfPlot chart, string title,
+ List<(double Time, double Value)> data, string colorHex,
+ double? yMin = null, double? yMax = null, BaselineBucket? baseline = null,
+ double minAnomalyValue = 0)
+ {
+ ClearChart(chart);
+ ApplyTheme(chart);
+
+ if (data.Count == 0)
+ {
+ ShowEmpty(chart, title);
+ return;
+ }
+
+ var times = data.Select(d => d.Time).ToArray();
+ var values = data.Select(d => d.Value).ToArray();
+
+ // Render baseline band FIRST (behind the data line)
+ if (baseline != null && baseline.SampleCount > 0 && baseline.EffectiveStdDev > 0)
+ {
+ var upper = baseline.Mean + 2 * baseline.EffectiveStdDev;
+ var lower = Math.Max(0, baseline.Mean - 2 * baseline.EffectiveStdDev);
+
+ _crosshairManager?.SetLaneBaseline(chart, lower, upper, minAnomalyValue);
+
+ var band = chart.Plot.Add.HorizontalSpan(lower, upper);
+ band.FillStyle.Color = ScottPlot.Color.FromHex(colorHex).WithAlpha(25);
+ band.LineStyle.Width = 0;
+
+ var meanLine = chart.Plot.Add.HorizontalLine(baseline.Mean);
+ meanLine.Color = ScottPlot.Color.FromHex(colorHex).WithAlpha(60);
+ meanLine.LinePattern = ScottPlot.LinePattern.Dashed;
+ meanLine.LineWidth = 1;
+
+ // Highlight anomalous points (outside ± 2σ band AND above absolute minimum)
+ var anomalyIndices = new List();
+ for (int i = 0; i < values.Length; i++)
+ {
+ if ((values[i] > upper && values[i] >= minAnomalyValue) || values[i] < lower)
+ anomalyIndices.Add(i);
+ }
+
+ if (anomalyIndices.Count > 0)
+ {
+ var anomalyTimes = anomalyIndices.Select(i => times[i]).ToArray();
+ var anomalyValues = anomalyIndices.Select(i => values[i]).ToArray();
+ var anomalyScatter = chart.Plot.Add.Scatter(anomalyTimes, anomalyValues);
+ anomalyScatter.Color = ScottPlot.Color.FromHex("#FF5252");
+ anomalyScatter.MarkerSize = 6;
+ anomalyScatter.MarkerShape = ScottPlot.MarkerShape.FilledCircle;
+ anomalyScatter.LineWidth = 0;
+ }
+ }
+
+ var scatter = chart.Plot.Add.Scatter(times, values);
+ scatter.Color = ScottPlot.Color.FromHex(colorHex);
+ scatter.MarkerSize = 0;
+ scatter.LineWidth = 1.5f;
+ scatter.LegendText = title;
+ scatter.ConnectStyle = ScottPlot.ConnectStyle.Straight;
+
+ _crosshairManager?.SetLaneData(chart, times, values);
+
+ chart.Plot.Axes.DateTimeTicksBottom();
+ // Hide bottom tick labels on all lanes except the last (File I/O)
+ if (chart != FileIoChart)
+ chart.Plot.Axes.Bottom.TickLabelStyle.IsVisible = false;
+
+ ReapplyAxisColors(chart);
+
+ // Compact layout: hide Y label, minimize title, no legend
+ chart.Plot.Title("");
+ chart.Plot.YLabel("");
+ chart.Plot.Legend.IsVisible = false;
+ chart.Plot.Axes.Margins(bottom: 0);
+
+ if (yMin.HasValue && yMax.HasValue)
+ chart.Plot.Axes.SetLimitsY(yMin.Value, yMax.Value);
+ else
+ {
+ var maxVal = data.Max(d => d.Value);
+ var minVal = data.Min(d => d.Value);
+ var padding = Math.Max((maxVal - minVal) * 0.1, 1);
+ chart.Plot.Axes.SetLimitsY(Math.Max(0, minVal - padding), maxVal + padding);
+ }
+
+ chart.Refresh();
+ }
+
+ ///
+ /// Sets identical X-axis limits across all lanes.
+ ///
+ private void SyncXAxes(int hoursBack, DateTime? fromDate, DateTime? toDate, double utcOffset)
+ {
+ DateTime xStart, xEnd;
+ if (fromDate.HasValue && toDate.HasValue)
+ {
+ xStart = fromDate.Value;
+ xEnd = toDate.Value;
+ }
+ else
+ {
+ xEnd = DateTime.UtcNow.AddMinutes(utcOffset);
+ xStart = xEnd.AddHours(-hoursBack);
+ }
+
+ double xMin = xStart.ToOADate();
+ double xMax = xEnd.ToOADate();
+
+ var charts = new[] { CpuChart, WaitStatsChart, BlockingChart, MemoryChart, FileIoChart };
+ foreach (var chart in charts)
+ {
+ chart.Plot.Axes.SetLimitsX(xMin, xMax);
+ chart.Refresh();
+ }
+ }
+
+ ///
+ /// Renders a semi-transparent dashed ghost line for comparison overlay.
+ ///
+ private static void AddGhostLine(ScottPlot.WPF.WpfPlot chart,
+ List<(double Time, double Value)> data, string colorHex)
+ {
+ if (data.Count == 0) return;
+
+ var times = data.Select(d => d.Time).ToArray();
+ var values = data.Select(d => d.Value).ToArray();
+
+ var scatter = chart.Plot.Add.Scatter(times, values);
+ // White-ish ghost line — distinct from the primary colored line
+ scatter.Color = ScottPlot.Colors.White.WithAlpha(140);
+ scatter.MarkerSize = 0;
+ scatter.LineWidth = 1.5f;
+ scatter.LinePattern = ScottPlot.LinePattern.Dashed;
+
+ chart.Refresh();
+ }
+
+ private static string ComparisonLabel((DateTime From, DateTime To) range,
+ DateTime? fromDate, int hoursBack)
+ {
+ var currentStart = fromDate ?? DateTime.UtcNow.AddHours(-hoursBack);
+ var daysBack = (currentStart - range.From).TotalDays;
+
+ if (Math.Abs(daysBack - 1) < 0.5) return "yesterday";
+ if (Math.Abs(daysBack - 7) < 0.5) return "last week";
+ return $"{daysBack:N0}d ago";
+ }
+
+ private static void ClearChart(ScottPlot.WPF.WpfPlot chart)
+ {
+ chart.Reset();
+ chart.Plot.Clear();
+ }
+
+ private static void ShowEmpty(ScottPlot.WPF.WpfPlot chart, string title)
+ {
+ ReapplyAxisColors(chart);
+ var text = chart.Plot.Add.Text($"{title}\nNo Data", 0, 0);
+ text.LabelFontColor = ScottPlot.Color.FromHex("#888888");
+ text.LabelFontSize = 12;
+ text.LabelAlignment = ScottPlot.Alignment.MiddleCenter;
+ chart.Plot.HideGrid();
+ chart.Plot.Axes.SetLimitsX(-1, 1);
+ chart.Plot.Axes.SetLimitsY(-1, 1);
+ chart.Plot.Axes.Bottom.TickGenerator = new ScottPlot.TickGenerators.EmptyTickGenerator();
+ chart.Plot.Axes.Left.TickGenerator = new ScottPlot.TickGenerators.EmptyTickGenerator();
+ chart.Plot.Legend.IsVisible = false;
+ chart.Refresh();
+ }
+
+ ///
+ /// Reapplies theme to all lane charts (call on theme change).
+ ///
+ public void ReapplyTheme()
+ {
+ var charts = new[] { CpuChart, WaitStatsChart, BlockingChart, MemoryChart, FileIoChart };
+ foreach (var chart in charts)
+ {
+ ApplyTheme(chart);
+ chart.Refresh();
+ }
+ }
+
+ private static void ApplyTheme(ScottPlot.WPF.WpfPlot chart)
+ {
+ ScottPlot.Color figureBackground, dataBackground, textColor, gridColor;
+
+ if (ThemeManager.CurrentTheme == "CoolBreeze")
+ {
+ figureBackground = ScottPlot.Color.FromHex("#EEF4FA");
+ dataBackground = ScottPlot.Color.FromHex("#DAE6F0");
+ textColor = ScottPlot.Color.FromHex("#1A2A3A");
+ gridColor = ScottPlot.Color.FromHex("#A8BDD0").WithAlpha(120);
+ }
+ else if (ThemeManager.HasLightBackground)
+ {
+ figureBackground = ScottPlot.Color.FromHex("#FFFFFF");
+ dataBackground = ScottPlot.Color.FromHex("#F5F7FA");
+ textColor = ScottPlot.Color.FromHex("#1A1D23");
+ gridColor = ScottPlot.Colors.Black.WithAlpha(20);
+ }
+ else
+ {
+ figureBackground = ScottPlot.Color.FromHex("#22252b");
+ dataBackground = ScottPlot.Color.FromHex("#111217");
+ textColor = ScottPlot.Color.FromHex("#E4E6EB");
+ gridColor = ScottPlot.Colors.White.WithAlpha(40);
+ }
+
+ chart.Plot.FigureBackground.Color = figureBackground;
+ chart.Plot.DataBackground.Color = dataBackground;
+ chart.Plot.Axes.Color(textColor);
+ chart.Plot.Grid.MajorLineColor = gridColor;
+ chart.Plot.Legend.IsVisible = false;
+ chart.Plot.Axes.Margins(bottom: 0);
+ chart.Plot.Axes.Bottom.TickLabelStyle.ForeColor = textColor;
+ chart.Plot.Axes.Left.TickLabelStyle.ForeColor = textColor;
+ chart.Plot.Axes.Bottom.Label.ForeColor = textColor;
+ chart.Plot.Axes.Left.Label.ForeColor = textColor;
+
+ chart.Background = new System.Windows.Media.SolidColorBrush(
+ System.Windows.Media.Color.FromRgb(figureBackground.R, figureBackground.G, figureBackground.B));
+ }
+
+ private static void ReapplyAxisColors(ScottPlot.WPF.WpfPlot chart)
+ {
+ var textColor = ThemeManager.CurrentTheme == "CoolBreeze"
+ ? ScottPlot.Color.FromHex("#1A2A3A")
+ : ThemeManager.HasLightBackground
+ ? ScottPlot.Color.FromHex("#1A1D23")
+ : ScottPlot.Color.FromHex("#E4E6EB");
+ chart.Plot.Axes.Bottom.TickLabelStyle.ForeColor = textColor;
+ chart.Plot.Axes.Left.TickLabelStyle.ForeColor = textColor;
+ chart.Plot.Axes.Bottom.Label.ForeColor = textColor;
+ chart.Plot.Axes.Left.Label.ForeColor = textColor;
+ }
+}
diff --git a/Lite/Controls/FinOpsTab.xaml.cs b/Lite/Controls/FinOpsTab.xaml.cs
index 67505af..ca5eda6 100644
--- a/Lite/Controls/FinOpsTab.xaml.cs
+++ b/Lite/Controls/FinOpsTab.xaml.cs
@@ -18,6 +18,7 @@
using System.Windows.Media;
using Microsoft.Win32;
using PerformanceMonitorLite.Models;
+using PerformanceMonitorLite.Helpers;
using PerformanceMonitorLite.Services;
namespace PerformanceMonitorLite.Controls;
@@ -884,7 +885,7 @@ private void CopyAllRows_Click(object sender, RoutedEventArgs e)
foreach (var col in grid.Columns)
{
- sb.Append(col.Header?.ToString() ?? "");
+ sb.Append(DataGridClipboardBehavior.GetHeaderText(col));
sb.Append('\t');
}
sb.AppendLine();
@@ -930,7 +931,7 @@ private void ExportToCsv_Click(object sender, RoutedEventArgs e)
var headers = new List();
foreach (var col in grid.Columns)
- headers.Add(CsvEscape(col.Header?.ToString() ?? ""));
+ headers.Add(CsvEscape(DataGridClipboardBehavior.GetHeaderText(col)));
sb.AppendLine(string.Join(",", headers));
foreach (var item in grid.Items)
diff --git a/Lite/Controls/PlanViewerControl.xaml.cs b/Lite/Controls/PlanViewerControl.xaml.cs
index fc40d6a..fb4af2e 100644
--- a/Lite/Controls/PlanViewerControl.xaml.cs
+++ b/Lite/Controls/PlanViewerControl.xaml.cs
@@ -295,6 +295,27 @@ private Border CreateNodeVisual(PlanNode node, int totalWarningCount = -1)
iconRow.Children.Add(parBadge);
}
+ // Nonclustered index count badge (modification operators maintaining multiple NC indexes)
+ if (node.NonClusteredIndexCount > 0)
+ {
+ var ncBadge = new Border
+ {
+ Background = new SolidColorBrush(Color.FromRgb(0x6C, 0x75, 0x7D)),
+ CornerRadius = new CornerRadius(4),
+ Padding = new Thickness(4, 1, 4, 1),
+ Margin = new Thickness(4, 0, 0, 0),
+ VerticalAlignment = VerticalAlignment.Center,
+ Child = new TextBlock
+ {
+ Text = $"+{node.NonClusteredIndexCount} NC",
+ FontSize = 10,
+ FontWeight = FontWeights.SemiBold,
+ Foreground = Brushes.White
+ }
+ };
+ iconRow.Children.Add(ncBadge);
+ }
+
stack.Children.Add(iconRow);
// Operator name — use full name, let TextTrimming handle overflow
@@ -710,7 +731,7 @@ private void ShowPropertiesPanel(PlanNode node)
|| node.SortDistinct || node.StartupExpression
|| node.NLOptimized || node.WithOrderedPrefetch || node.WithUnorderedPrefetch
|| node.WithTies || node.Remoting || node.LocalParallelism
- || node.SpoolStack || node.DMLRequestSort
+ || node.SpoolStack || node.DMLRequestSort || node.NonClusteredIndexCount > 0
|| !string.IsNullOrEmpty(node.OffsetExpression) || node.TopRows > 0
|| !string.IsNullOrEmpty(node.ConstantScanValues)
|| !string.IsNullOrEmpty(node.UdxUsedColumns);
@@ -759,6 +780,12 @@ private void ShowPropertiesPanel(PlanNode node)
AddPropertyRow("Primary Node Id", $"{node.PrimaryNodeId}");
if (node.DMLRequestSort)
AddPropertyRow("DML Request Sort", "True");
+ if (node.NonClusteredIndexCount > 0)
+ {
+ AddPropertyRow("NC Indexes Maintained", $"{node.NonClusteredIndexCount}");
+ foreach (var ixName in node.NonClusteredIndexNames)
+ AddPropertyRow("", ixName, isCode: true);
+ }
if (!string.IsNullOrEmpty(node.ActionColumn))
AddPropertyRow("Action Column", node.ActionColumn, isCode: true);
if (!string.IsNullOrEmpty(node.SegmentColumn))
@@ -1664,6 +1691,10 @@ private ToolTip BuildNodeTooltip(PlanNode node, List? allWarnings =
AddTooltipRow(stack, "Scan Direction", node.ScanDirection);
}
+ // NC index maintenance count
+ if (node.NonClusteredIndexCount > 0)
+ AddTooltipRow(stack, "NC Indexes Maintained", string.Join(", ", node.NonClusteredIndexNames));
+
// Operator details (key items only in tooltip)
var hasTooltipDetails = !string.IsNullOrEmpty(node.OrderBy)
|| !string.IsNullOrEmpty(node.TopExpression)
diff --git a/Lite/Controls/ServerTab.xaml b/Lite/Controls/ServerTab.xaml
index daa2305..c04d23d 100644
--- a/Lite/Controls/ServerTab.xaml
+++ b/Lite/Controls/ServerTab.xaml
@@ -103,6 +103,16 @@
SelectionChanged="CustomTimeCombo_Changed" ToolTip="Minute"/>
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
@@ -359,11 +348,16 @@
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -759,7 +1014,7 @@
diff --git a/Lite/Controls/ServerTab.xaml.cs b/Lite/Controls/ServerTab.xaml.cs
index f3261a5..4370176 100644
--- a/Lite/Controls/ServerTab.xaml.cs
+++ b/Lite/Controls/ServerTab.xaml.cs
@@ -46,10 +46,6 @@ public partial class ServerTab : UserControl
private List _perfmonCounterItems = new();
private Helpers.ChartHoverHelper? _waitStatsHover;
private Helpers.ChartHoverHelper? _perfmonHover;
- private Helpers.ChartHoverHelper? _overviewCpuHover;
- private Helpers.ChartHoverHelper? _overviewMemoryHover;
- private Helpers.ChartHoverHelper? _overviewFileIoHover;
- private Helpers.ChartHoverHelper? _overviewWaitStatsHover;
private Helpers.ChartHoverHelper? _cpuHover;
private Helpers.ChartHoverHelper? _memoryHover;
private Helpers.ChartHoverHelper? _tempDbHover;
@@ -203,10 +199,6 @@ public ServerTab(ServerConnection server, DuckDbInitializer duckDb, CredentialSe
}
/* Apply theme immediately so charts don't flash white before data loads */
- ApplyTheme(OverviewCpuChart);
- ApplyTheme(OverviewMemoryChart);
- ApplyTheme(OverviewFileIoChart);
- ApplyTheme(OverviewWaitStatsChart);
ApplyTheme(WaitStatsChart);
ApplyTheme(QueryDurationTrendChart);
ApplyTheme(ProcDurationTrendChart);
@@ -233,10 +225,7 @@ public ServerTab(ServerConnection server, DuckDbInitializer duckDb, CredentialSe
ApplyTheme(QueryHeatmapChart);
/* Chart hover tooltips */
- _overviewCpuHover = new Helpers.ChartHoverHelper(OverviewCpuChart, "%");
- _overviewMemoryHover = new Helpers.ChartHoverHelper(OverviewMemoryChart, "MB");
- _overviewFileIoHover = new Helpers.ChartHoverHelper(OverviewFileIoChart, "ms");
- _overviewWaitStatsHover = new Helpers.ChartHoverHelper(OverviewWaitStatsChart, "ms/sec");
+ CorrelatedLanes.Initialize(_dataService, _serverId);
_waitStatsHover = new Helpers.ChartHoverHelper(WaitStatsChart, "ms/sec");
_perfmonHover = new Helpers.ChartHoverHelper(PerfmonChart, "");
_cpuHover = new Helpers.ChartHoverHelper(CpuChart, "%");
@@ -325,16 +314,6 @@ public ServerTab(ServerConnection server, DuckDbInitializer duckDb, CredentialSe
Helpers.ContextMenuHelper.SetupChartContextMenu(ProcDurationTrendChart, "Procedure_Duration_Trends");
Helpers.ContextMenuHelper.SetupChartContextMenu(QueryStoreDurationTrendChart, "QueryStore_Duration_Trends");
Helpers.ContextMenuHelper.SetupChartContextMenu(ExecutionCountTrendChart, "Execution_Count_Trends");
- /* Overview chart context menus */
- var ovCpuMenu = Helpers.ContextMenuHelper.SetupChartContextMenu(OverviewCpuChart, "Overview_CPU");
- AddChartDrillDownMenuItem(OverviewCpuChart, ovCpuMenu, _overviewCpuHover, "Show Active Queries at This Time", OnCpuDrillDown);
- var ovMemMenu = Helpers.ContextMenuHelper.SetupChartContextMenu(OverviewMemoryChart, "Overview_Memory");
- AddChartDrillDownMenuItem(OverviewMemoryChart, ovMemMenu, _overviewMemoryHover, "Show Active Queries at This Time", OnMemoryDrillDown);
- var ovIoMenu = Helpers.ContextMenuHelper.SetupChartContextMenu(OverviewFileIoChart, "Overview_FileIO");
- AddChartDrillDownMenuItem(OverviewFileIoChart, ovIoMenu, _overviewFileIoHover, "Show Active Queries at This Time", OnCpuDrillDown);
- var ovWaitMenu = Helpers.ContextMenuHelper.SetupChartContextMenu(OverviewWaitStatsChart, "Overview_WaitStats");
- AddChartDrillDownMenuItem(OverviewWaitStatsChart, ovWaitMenu, _overviewWaitStatsHover, "Show Active Queries at This Time", OnCpuDrillDown);
-
var cpuMenu = Helpers.ContextMenuHelper.SetupChartContextMenu(CpuChart, "CPU_Usage");
AddChartDrillDownMenuItem(CpuChart, cpuMenu, _cpuHover, "Show Active Queries at This Time", OnCpuDrillDown);
var memoryMenu = Helpers.ContextMenuHelper.SetupChartContextMenu(MemoryChart, "Memory_Usage");
@@ -639,6 +618,74 @@ private async void TimeRangeCombo_SelectionChanged(object sender, SelectionChang
}
}
+ private async void CompareToCombo_SelectionChanged(object sender, SelectionChangedEventArgs e)
+ {
+ if (!IsLoaded || _isRefreshing) return;
+
+ var hoursBack = GetHoursBack();
+ DateTime? fromDate = null, toDate = null;
+ if (IsCustomRange)
+ {
+ var fromLocal = GetDateTimeFromPickers(FromDatePicker!, FromHourCombo, FromMinuteCombo);
+ var toLocal = GetDateTimeFromPickers(ToDatePicker!, ToHourCombo, ToMinuteCombo);
+ if (fromLocal.HasValue && toLocal.HasValue)
+ {
+ fromDate = ServerTimeHelper.DisplayTimeToServerTime(fromLocal.Value, ServerTimeHelper.CurrentDisplayMode);
+ toDate = ServerTimeHelper.DisplayTimeToServerTime(toLocal.Value, ServerTimeHelper.CurrentDisplayMode);
+ }
+ }
+
+ await RefreshOverviewAsync(hoursBack, fromDate, toDate);
+
+ // Also refresh comparison grids
+ try
+ {
+ var currentEnd = toDate ?? DateTime.UtcNow;
+ var currentStart = fromDate ?? currentEnd.AddHours(-hoursBack);
+ await RefreshQueryStatsComparisonAsync(currentStart, currentEnd);
+ await RefreshProcStatsComparisonAsync(currentStart, currentEnd);
+ await RefreshQueryStoreComparisonAsync(currentStart, currentEnd);
+ }
+ catch (Exception ex)
+ {
+ AppLogger.Info("ServerTab", $"[{_server.DisplayName}] Comparison refresh failed: {ex.Message}");
+ }
+ }
+
+ ///
+ /// Computes the reference time range for the comparison overlay based on the
+ /// current Compare dropdown selection and the active time range.
+ /// Returns null if "None" is selected.
+ ///
+ private (DateTime From, DateTime To)? GetComparisonRange()
+ {
+ if (CompareToCombo == null || CompareToCombo.SelectedIndex <= 0) return null;
+
+ var hoursBack = GetHoursBack();
+ DateTime? fromDate = null, toDate = null;
+ if (IsCustomRange)
+ {
+ var fromLocal = GetDateTimeFromPickers(FromDatePicker!, FromHourCombo, FromMinuteCombo);
+ var toLocal = GetDateTimeFromPickers(ToDatePicker!, ToHourCombo, ToMinuteCombo);
+ if (fromLocal.HasValue && toLocal.HasValue)
+ {
+ fromDate = ServerTimeHelper.DisplayTimeToServerTime(fromLocal.Value, ServerTimeHelper.CurrentDisplayMode);
+ toDate = ServerTimeHelper.DisplayTimeToServerTime(toLocal.Value, ServerTimeHelper.CurrentDisplayMode);
+ }
+ }
+
+ var currentEnd = toDate ?? DateTime.UtcNow;
+ var currentStart = fromDate ?? currentEnd.AddHours(-hoursBack);
+
+ return CompareToCombo.SelectedIndex switch
+ {
+ 1 => (currentStart.AddDays(-1), currentEnd.AddDays(-1)), // Yesterday
+ 2 => (currentStart.AddDays(-7), currentEnd.AddDays(-7)), // Last week
+ 3 => (currentStart.AddDays(-7), currentEnd.AddDays(-7)), // Same day last week
+ _ => null
+ };
+ }
+
private async void CustomDateRange_Changed(object sender, SelectionChangedEventArgs e)
{
if (!IsLoaded || _isRefreshing) return;
@@ -927,12 +974,27 @@ await System.Threading.Tasks.Task.WhenAll(
LiveSnapshotIndicator.Text = "";
_queryStatsFilterMgr!.UpdateData(queryStatsTask.Result);
SetDefaultSortIfNone(QueryStatsGrid, "TotalElapsedMs", ListSortDirection.Descending);
+ {
+ var cEnd = toDate ?? DateTime.UtcNow;
+ var cStart = fromDate ?? cEnd.AddHours(-hoursBack);
+ await RefreshQueryStatsComparisonAsync(cStart, cEnd);
+ }
_procStatsFilterMgr!.UpdateData(procStatsTask.Result);
SetDefaultSortIfNone(ProcedureStatsGrid, "TotalElapsedMs", ListSortDirection.Descending);
+ {
+ var cEnd2 = toDate ?? DateTime.UtcNow;
+ var cStart2 = fromDate ?? cEnd2.AddHours(-hoursBack);
+ await RefreshProcStatsComparisonAsync(cStart2, cEnd2);
+ }
_blockedProcessFilterMgr!.UpdateData(blockedProcessTask.Result);
_deadlockFilterMgr!.UpdateData(DeadlockProcessDetail.ParseFromRows(deadlockTask.Result));
_queryStoreFilterMgr!.UpdateData(queryStoreTask.Result);
SetDefaultSortIfNone(QueryStoreGrid, "TotalDurationMs", ListSortDirection.Descending);
+ {
+ var cEnd3 = toDate ?? DateTime.UtcNow;
+ var cStart3 = fromDate ?? cEnd3.AddHours(-hoursBack);
+ await RefreshQueryStoreComparisonAsync(cStart3, cEnd3);
+ }
_serverConfigFilterMgr!.UpdateData(serverConfigTask.Result);
_databaseConfigFilterMgr!.UpdateData(databaseConfigTask.Result);
_dbScopedConfigFilterMgr!.UpdateData(databaseScopedConfigTask.Result);
@@ -1043,18 +1105,33 @@ private async System.Threading.Tasks.Task RefreshQueriesAsync(int hoursBack, Dat
_queryStatsFilterMgr!.UpdateData(queryStats);
SetDefaultSortIfNone(QueryStatsGrid, "TotalElapsedMs", ListSortDirection.Descending);
_ = LoadQueryStatsSlicerAsync();
+ {
+ var cEnd = toDate ?? DateTime.UtcNow;
+ var cStart = fromDate ?? cEnd.AddHours(-hoursBack);
+ await RefreshQueryStatsComparisonAsync(cStart, cEnd);
+ }
break;
case 3: // Top Procedures by Duration
var procStats = await _dataService.GetTopProceduresByCpuAsync(_serverId, hoursBack, 50, fromDate, toDate, UtcOffsetMinutes);
_procStatsFilterMgr!.UpdateData(procStats);
SetDefaultSortIfNone(ProcedureStatsGrid, "TotalElapsedMs", ListSortDirection.Descending);
_ = LoadProcStatsSlicerAsync();
+ {
+ var cEnd = toDate ?? DateTime.UtcNow;
+ var cStart = fromDate ?? cEnd.AddHours(-hoursBack);
+ await RefreshProcStatsComparisonAsync(cStart, cEnd);
+ }
break;
case 4: // Query Store by Duration
var qsData = await _dataService.GetQueryStoreTopQueriesAsync(_serverId, hoursBack, 50, fromDate, toDate);
_queryStoreFilterMgr!.UpdateData(qsData);
SetDefaultSortIfNone(QueryStoreGrid, "TotalDurationMs", ListSortDirection.Descending);
_ = LoadQueryStoreSlicerAsync();
+ {
+ var cEnd = toDate ?? DateTime.UtcNow;
+ var cStart = fromDate ?? cEnd.AddHours(-hoursBack);
+ await RefreshQueryStoreComparisonAsync(cStart, cEnd);
+ }
break;
case 5: // Query Heatmap
var hmMetric = (HeatmapMetric)HeatmapMetricCombo.SelectedIndex;
@@ -1094,12 +1171,27 @@ await System.Threading.Tasks.Task.WhenAll(
_queryStatsFilterMgr!.UpdateData(queryStatsTask.Result);
SetDefaultSortIfNone(QueryStatsGrid, "TotalElapsedMs", ListSortDirection.Descending);
_ = LoadQueryStatsSlicerAsync();
+ {
+ var cEnd = toDate ?? DateTime.UtcNow;
+ var cStart = fromDate ?? cEnd.AddHours(-hoursBack);
+ await RefreshQueryStatsComparisonAsync(cStart, cEnd);
+ }
_procStatsFilterMgr!.UpdateData(procStatsTask.Result);
SetDefaultSortIfNone(ProcedureStatsGrid, "TotalElapsedMs", ListSortDirection.Descending);
_ = LoadProcStatsSlicerAsync();
+ {
+ var cEnd2 = toDate ?? DateTime.UtcNow;
+ var cStart2 = fromDate ?? cEnd2.AddHours(-hoursBack);
+ await RefreshProcStatsComparisonAsync(cStart2, cEnd2);
+ }
_queryStoreFilterMgr!.UpdateData(queryStoreTask.Result);
SetDefaultSortIfNone(QueryStoreGrid, "TotalDurationMs", ListSortDirection.Descending);
_ = LoadQueryStoreSlicerAsync();
+ {
+ var cEnd3 = toDate ?? DateTime.UtcNow;
+ var cStart3 = fromDate ?? cEnd3.AddHours(-hoursBack);
+ await RefreshQueryStoreComparisonAsync(cStart3, cEnd3);
+ }
UpdateQueryDurationTrendChart(queryDurationTrendTask.Result);
UpdateProcDurationTrendChart(procDurationTrendTask.Result);
@@ -1113,157 +1205,133 @@ await System.Threading.Tasks.Task.WhenAll(
}
}
- /// Tab 3 — CPU
- /// Tab 0 — Overview (4 charts: CPU, Memory, File I/O, Wait Stats)
- private async System.Threading.Tasks.Task RefreshOverviewAsync(int hoursBack, DateTime? fromDate, DateTime? toDate)
- {
- try
- {
- var cpuTask = SafeQueryAsync(() => _dataService.GetCpuUtilizationAsync(_serverId, hoursBack, fromDate, toDate));
- var memoryTask = SafeQueryAsync(() => _dataService.GetMemoryTrendAsync(_serverId, hoursBack, fromDate, toDate));
- var fileIoTask = SafeQueryAsync(() => _dataService.GetFileIoLatencyTrendAsync(_serverId, hoursBack, fromDate, toDate));
+ private bool IsQueryStatsComparisonActive => GetComparisonRange() != null;
- // Get top 5 wait types then fetch trends for each
- var waitStats = await SafeQueryAsync(() => _dataService.GetWaitStatsAsync(_serverId, hoursBack, fromDate, toDate));
- var topWaits = waitStats.Take(5).Select(w => w.WaitType).ToList();
- await System.Threading.Tasks.Task.WhenAll(cpuTask, memoryTask, fileIoTask);
+ private void SetQueryStatsComparisonMode(bool active, (DateTime From, DateTime To)? baselineRange = null)
+ {
+ QueryStatsGrid.Visibility = active ? System.Windows.Visibility.Collapsed : System.Windows.Visibility.Visible;
+ QueryStatsComparisonGrid.Visibility = active ? System.Windows.Visibility.Visible : System.Windows.Visibility.Collapsed;
+ QueryStatsComparisonBanner.Visibility = active ? System.Windows.Visibility.Visible : System.Windows.Visibility.Collapsed;
- UpdateOverviewCpuChart(cpuTask.Result);
- UpdateOverviewMemoryChart(memoryTask.Result);
- UpdateOverviewFileIoChart(fileIoTask.Result);
- await UpdateOverviewWaitStatsChartAsync(topWaits, hoursBack, fromDate, toDate);
- }
- catch (Exception ex)
+ if (active && baselineRange.HasValue)
{
- AppLogger.Info("ServerTab", $"[{_server.DisplayName}] RefreshOverviewAsync failed: {ex.Message}");
+ var from = ServerTimeHelper.FormatServerTime(baselineRange.Value.From);
+ var to = ServerTimeHelper.FormatServerTime(baselineRange.Value.To);
+ QueryStatsComparisonBanner.Text = $"Comparing against baseline: {from} \u2192 {to}";
}
}
- private void UpdateOverviewCpuChart(List data)
+ private async System.Threading.Tasks.Task RefreshQueryStatsComparisonAsync(DateTime currentStart, DateTime currentEnd)
{
- ClearChart(OverviewCpuChart);
- _overviewCpuHover?.Clear();
- ApplyTheme(OverviewCpuChart);
+ var baselineRange = GetComparisonRange();
+ if (baselineRange == null)
+ {
+ SetQueryStatsComparisonMode(false);
+ return;
+ }
- if (data.Count == 0) { RefreshEmptyChart(OverviewCpuChart, "CPU Utilization", "CPU %"); return; }
+ SetQueryStatsComparisonMode(true, baselineRange);
- var times = data.Select(d => d.SampleTime.ToOADate()).ToArray();
- var sqlCpu = data.Select(d => (double)d.SqlServerCpu).ToArray();
+ var items = await _dataService.GetQueryStatsComparisonAsync(
+ _serverId, currentStart, currentEnd,
+ baselineRange.Value.From, baselineRange.Value.To);
- var plot = OverviewCpuChart.Plot.Add.Scatter(times, sqlCpu);
- plot.LegendText = "SQL CPU %";
- plot.Color = ScottPlot.Color.FromHex("#4FC3F7");
- _overviewCpuHover?.Add(plot, "SQL CPU %");
+ // Sort: NEW first, then by duration delta descending, GONE last
+ var sorted = items
+ .OrderBy(x => x.SortGroup)
+ .ThenByDescending(x => x.SortableDurationDelta)
+ .ToList();
- OverviewCpuChart.Plot.Axes.DateTimeTicksBottom();
- ReapplyAxisColors(OverviewCpuChart);
- OverviewCpuChart.Plot.Title("CPU Utilization");
- OverviewCpuChart.Plot.YLabel("CPU %");
- OverviewCpuChart.Plot.Axes.SetLimitsY(0, 105);
- ShowChartLegend(OverviewCpuChart);
- OverviewCpuChart.Refresh();
+ QueryStatsComparisonGrid.ItemsSource = sorted;
}
- private void UpdateOverviewMemoryChart(List data)
+ private void SetProcStatsComparisonMode(bool active, (DateTime From, DateTime To)? baselineRange = null)
{
- ClearChart(OverviewMemoryChart);
- _overviewMemoryHover?.Clear();
- ApplyTheme(OverviewMemoryChart);
+ ProcedureStatsGrid.Visibility = active ? System.Windows.Visibility.Collapsed : System.Windows.Visibility.Visible;
+ ProcStatsComparisonGrid.Visibility = active ? System.Windows.Visibility.Visible : System.Windows.Visibility.Collapsed;
+ ProcStatsComparisonBanner.Visibility = active ? System.Windows.Visibility.Visible : System.Windows.Visibility.Collapsed;
- if (data.Count == 0) { RefreshEmptyChart(OverviewMemoryChart, "Memory Utilization", "MB"); return; }
+ if (active && baselineRange.HasValue)
+ {
+ var from = ServerTimeHelper.FormatServerTime(baselineRange.Value.From);
+ var to = ServerTimeHelper.FormatServerTime(baselineRange.Value.To);
+ ProcStatsComparisonBanner.Text = $"Comparing against baseline: {from} \u2192 {to}";
+ }
+ }
- var times = data.Select(d => d.CollectionTime.AddMinutes(UtcOffsetMinutes).ToOADate()).ToArray();
- var bufferPool = data.Select(d => d.BufferPoolMb).ToArray();
- var grants = data.Select(d => d.TotalGrantedMb).ToArray();
+ private async System.Threading.Tasks.Task RefreshProcStatsComparisonAsync(DateTime currentStart, DateTime currentEnd)
+ {
+ var baselineRange = GetComparisonRange();
+ if (baselineRange == null)
+ {
+ SetProcStatsComparisonMode(false);
+ return;
+ }
- var bpPlot = OverviewMemoryChart.Plot.Add.Scatter(times, bufferPool);
- bpPlot.LegendText = "Buffer Pool";
- bpPlot.Color = ScottPlot.Color.FromHex("#CE93D8");
- _overviewMemoryHover?.Add(bpPlot, "Buffer Pool");
+ SetProcStatsComparisonMode(true, baselineRange);
- var grantPlot = OverviewMemoryChart.Plot.Add.Scatter(times, grants);
- grantPlot.LegendText = "Memory Grants";
- grantPlot.Color = ScottPlot.Color.FromHex("#FFB74D");
- _overviewMemoryHover?.Add(grantPlot, "Memory Grants");
+ var items = await _dataService.GetProcedureStatsComparisonAsync(
+ _serverId, currentStart, currentEnd,
+ baselineRange.Value.From, baselineRange.Value.To);
+
+ var sorted = items
+ .OrderBy(x => x.SortGroup)
+ .ThenByDescending(x => x.SortableDurationDelta)
+ .ToList();
- OverviewMemoryChart.Plot.Axes.DateTimeTicksBottom();
- ReapplyAxisColors(OverviewMemoryChart);
- OverviewMemoryChart.Plot.Title("Memory Utilization");
- OverviewMemoryChart.Plot.YLabel("MB");
- SetChartYLimitsWithLegendPadding(OverviewMemoryChart, 0, bufferPool.Max());
- ShowChartLegend(OverviewMemoryChart);
- OverviewMemoryChart.Refresh();
+ ProcStatsComparisonGrid.ItemsSource = sorted;
}
- private void UpdateOverviewFileIoChart(List data)
+ private void SetQueryStoreComparisonMode(bool active, (DateTime From, DateTime To)? baselineRange = null)
{
- ClearChart(OverviewFileIoChart);
- _overviewFileIoHover?.Clear();
- ApplyTheme(OverviewFileIoChart);
+ QueryStoreGrid.Visibility = active ? System.Windows.Visibility.Collapsed : System.Windows.Visibility.Visible;
+ QueryStoreComparisonGrid.Visibility = active ? System.Windows.Visibility.Visible : System.Windows.Visibility.Collapsed;
+ QueryStoreComparisonBanner.Visibility = active ? System.Windows.Visibility.Visible : System.Windows.Visibility.Collapsed;
- if (data.Count == 0) { RefreshEmptyChart(OverviewFileIoChart, "File I/O Latency", "ms"); return; }
+ if (active && baselineRange.HasValue)
+ {
+ var from = ServerTimeHelper.FormatServerTime(baselineRange.Value.From);
+ var to = ServerTimeHelper.FormatServerTime(baselineRange.Value.To);
+ QueryStoreComparisonBanner.Text = $"Comparing against baseline: {from} \u2192 {to}";
+ }
+ }
- // Aggregate across all databases/files per collection time
- var grouped = data
- .GroupBy(d => d.CollectionTime)
- .OrderBy(g => g.Key)
- .Select(g => new { Time = g.Key, ReadMs = g.Average(x => x.AvgReadLatencyMs), WriteMs = g.Average(x => x.AvgWriteLatencyMs) })
- .ToList();
+ private async System.Threading.Tasks.Task RefreshQueryStoreComparisonAsync(DateTime currentStart, DateTime currentEnd)
+ {
+ var baselineRange = GetComparisonRange();
+ if (baselineRange == null)
+ {
+ SetQueryStoreComparisonMode(false);
+ return;
+ }
- var times = grouped.Select(d => d.Time.AddMinutes(UtcOffsetMinutes).ToOADate()).ToArray();
- var readMs = grouped.Select(d => d.ReadMs).ToArray();
- var writeMs = grouped.Select(d => d.WriteMs).ToArray();
+ SetQueryStoreComparisonMode(true, baselineRange);
- var readPlot = OverviewFileIoChart.Plot.Add.Scatter(times, readMs);
- readPlot.LegendText = "Read ms";
- readPlot.Color = ScottPlot.Color.FromHex("#81C784");
- _overviewFileIoHover?.Add(readPlot, "Read ms");
+ var items = await _dataService.GetQueryStoreComparisonAsync(
+ _serverId, currentStart, currentEnd,
+ baselineRange.Value.From, baselineRange.Value.To);
- var writePlot = OverviewFileIoChart.Plot.Add.Scatter(times, writeMs);
- writePlot.LegendText = "Write ms";
- writePlot.Color = ScottPlot.Color.FromHex("#FFB74D");
- _overviewFileIoHover?.Add(writePlot, "Write ms");
+ var sorted = items
+ .OrderBy(x => x.SortGroup)
+ .ThenByDescending(x => x.SortableDurationDelta)
+ .ToList();
- OverviewFileIoChart.Plot.Axes.DateTimeTicksBottom();
- ReapplyAxisColors(OverviewFileIoChart);
- OverviewFileIoChart.Plot.Title("File I/O Latency");
- OverviewFileIoChart.Plot.YLabel("Latency (ms)");
- var maxVal = Math.Max(readMs.DefaultIfEmpty(0).Max(), writeMs.DefaultIfEmpty(0).Max());
- SetChartYLimitsWithLegendPadding(OverviewFileIoChart, 0, maxVal);
- ShowChartLegend(OverviewFileIoChart);
- OverviewFileIoChart.Refresh();
+ QueryStoreComparisonGrid.ItemsSource = sorted;
}
- private async System.Threading.Tasks.Task UpdateOverviewWaitStatsChartAsync(
- List topWaits, int hoursBack, DateTime? fromDate, DateTime? toDate)
+ /// Tab 3 — CPU
+ /// Tab 0 — Overview (Correlated Timeline Lanes)
+ private async System.Threading.Tasks.Task RefreshOverviewAsync(int hoursBack, DateTime? fromDate, DateTime? toDate)
{
- ClearChart(OverviewWaitStatsChart);
- _overviewWaitStatsHover?.Clear();
- ApplyTheme(OverviewWaitStatsChart);
-
- if (topWaits.Count == 0) { RefreshEmptyChart(OverviewWaitStatsChart, "Wait Statistics", "ms/sec"); return; }
-
- var colors = new[] { "#4FC3F7", "#81C784", "#FFB74D", "#CE93D8", "#E57373" };
- for (int i = 0; i < Math.Min(topWaits.Count, 5); i++)
+ try
{
- var trend = await _dataService.GetWaitStatsTrendAsync(_serverId, topWaits[i], hoursBack, fromDate, toDate);
- if (trend.Count < 2) continue;
-
- var times = trend.Select(d => d.CollectionTime.AddMinutes(UtcOffsetMinutes).ToOADate()).ToArray();
- var values = trend.Select(d => d.WaitTimeMsPerSecond).ToArray();
-
- var plot = OverviewWaitStatsChart.Plot.Add.Scatter(times, values);
- plot.LegendText = topWaits[i];
- plot.Color = ScottPlot.Color.FromHex(colors[i]);
- _overviewWaitStatsHover?.Add(plot, topWaits[i]);
+ var comparison = GetComparisonRange();
+ await CorrelatedLanes.RefreshAsync(hoursBack, fromDate, toDate, comparison);
+ }
+ catch (Exception ex)
+ {
+ AppLogger.Info("ServerTab", $"[{_server.DisplayName}] RefreshOverviewAsync failed: {ex.Message}");
}
-
- OverviewWaitStatsChart.Plot.Axes.DateTimeTicksBottom();
- ReapplyAxisColors(OverviewWaitStatsChart);
- OverviewWaitStatsChart.Plot.Title("Wait Statistics");
- OverviewWaitStatsChart.Plot.YLabel("Wait Time (ms/sec)");
- ShowChartLegend(OverviewWaitStatsChart);
- OverviewWaitStatsChart.Refresh();
}
private async System.Threading.Tasks.Task RefreshCpuAsync(int hoursBack, DateTime? fromDate, DateTime? toDate)
@@ -1696,6 +1764,8 @@ private async void MainTabControl_SelectionChanged(object sender, SelectionChang
if (e.Source != MainTabControl && e.Source != QueriesSubTabControl
&& e.Source != MemorySubTabControl && e.Source != BlockingSubTabControl) return;
+ UpdateCompareDropdownState();
+
var hoursBack = GetHoursBack();
DateTime? fromDate = null, toDate = null;
if (IsCustomRange)
@@ -1711,6 +1781,35 @@ private async void MainTabControl_SelectionChanged(object sender, SelectionChang
await RefreshVisibleTabAsync(hoursBack, fromDate, toDate, subTabOnly: true);
}
+ private bool IsComparisonSupportedOnCurrentTab()
+ {
+ return MainTabControl.SelectedIndex switch
+ {
+ 0 => true, // Overview — correlated timeline lanes
+ 2 => QueriesSubTabControl.SelectedIndex is 2 or 3 or 4, // Top Queries / Top Procedures / Query Store
+ _ => false
+ };
+ }
+
+ private void UpdateCompareDropdownState()
+ {
+ var supported = IsComparisonSupportedOnCurrentTab();
+
+ if (supported)
+ {
+ CompareToCombo.IsEnabled = true;
+ CompareToCombo.Opacity = 1.0;
+ CompareToCombo.ToolTip = "Compare current period against a baseline";
+ }
+ else
+ {
+ CompareToCombo.SelectedIndex = 0;
+ CompareToCombo.IsEnabled = false;
+ CompareToCombo.Opacity = 0.5;
+ CompareToCombo.ToolTip = "Comparison is not available for this tab";
+ }
+ }
+
///
/// Wraps a query in a try/catch so it returns an empty list on failure instead of faulting.
///
@@ -3779,7 +3878,7 @@ private static void ApplyTheme(ScottPlot.WPF.WpfPlot chart)
{
figureBackground = ScottPlot.Color.FromHex("#EEF4FA");
dataBackground = ScottPlot.Color.FromHex("#DAE6F0");
- textColor = ScottPlot.Color.FromHex("#364D61");
+ textColor = ScottPlot.Color.FromHex("#1A2A3A");
gridColor = ScottPlot.Color.FromHex("#A8BDD0").WithAlpha(120);
legendBg = ScottPlot.Color.FromHex("#EEF4FA");
legendFg = ScottPlot.Color.FromHex("#1A2A3A");
@@ -3789,7 +3888,7 @@ private static void ApplyTheme(ScottPlot.WPF.WpfPlot chart)
{
figureBackground = ScottPlot.Color.FromHex("#FFFFFF");
dataBackground = ScottPlot.Color.FromHex("#F5F7FA");
- textColor = ScottPlot.Color.FromHex("#4A5568");
+ textColor = ScottPlot.Color.FromHex("#1A1D23");
gridColor = ScottPlot.Colors.Black.WithAlpha(20);
legendBg = ScottPlot.Color.FromHex("#FFFFFF");
legendFg = ScottPlot.Color.FromHex("#1A1D23");
@@ -3799,7 +3898,7 @@ private static void ApplyTheme(ScottPlot.WPF.WpfPlot chart)
{
figureBackground = ScottPlot.Color.FromHex("#22252b");
dataBackground = ScottPlot.Color.FromHex("#111217");
- textColor = ScottPlot.Color.FromHex("#9DA5B4");
+ textColor = ScottPlot.Color.FromHex("#E4E6EB");
gridColor = ScottPlot.Colors.White.WithAlpha(40);
legendBg = ScottPlot.Color.FromHex("#22252b");
legendFg = ScottPlot.Color.FromHex("#E4E6EB");
@@ -3849,6 +3948,8 @@ private void OnThemeChanged(string _)
chart.Refresh();
}
}
+
+ CorrelatedLanes.ReapplyTheme();
}
private static IEnumerable GetAllCharts(DependencyObject root)
@@ -3868,10 +3969,10 @@ private void OnThemeChanged(string _)
private static void ReapplyAxisColors(ScottPlot.WPF.WpfPlot chart)
{
var textColor = Helpers.ThemeManager.CurrentTheme == "CoolBreeze"
- ? ScottPlot.Color.FromHex("#364D61")
+ ? ScottPlot.Color.FromHex("#1A2A3A")
: Helpers.ThemeManager.HasLightBackground
- ? ScottPlot.Color.FromHex("#4A5568")
- : ScottPlot.Color.FromHex("#9DA5B4");
+ ? ScottPlot.Color.FromHex("#1A1D23")
+ : ScottPlot.Color.FromHex("#E4E6EB");
chart.Plot.Axes.Bottom.TickLabelStyle.ForeColor = textColor;
chart.Plot.Axes.Left.TickLabelStyle.ForeColor = textColor;
chart.Plot.Axes.Bottom.Label.ForeColor = textColor;
@@ -4112,7 +4213,7 @@ private void ExportToCsv_Click(object sender, RoutedEventArgs e)
var headers = new List();
foreach (var col in grid.Columns)
{
- headers.Add(CsvEscape(col.Header?.ToString() ?? "", sep));
+ headers.Add(CsvEscape(DataGridClipboardBehavior.GetHeaderText(col), sep));
}
sb.AppendLine(string.Join(sep, headers));
@@ -4784,6 +4885,7 @@ private async void OnQueryStatsSlicerChanged(object? sender, Controls.SlicerRang
var toServer = ServerTimeHelper.ToServerTime(e.EndUtc);
var queryStats = await _dataService.GetTopQueriesByCpuAsync(_serverId, 0, 50, fromServer, toServer, UtcOffsetMinutes);
_queryStatsFilterMgr!.UpdateData(queryStats);
+ await RefreshQueryStatsComparisonAsync(fromServer, toServer);
}
catch (Exception ex)
{
@@ -4882,6 +4984,7 @@ private async void OnQueryStoreSlicerChanged(object? sender, Controls.SlicerRang
var toServer = ServerTimeHelper.ToServerTime(e.EndUtc);
var qsData = await _dataService.GetQueryStoreTopQueriesAsync(_serverId, 0, 50, fromServer, toServer);
_queryStoreFilterMgr!.UpdateData(qsData);
+ await RefreshQueryStoreComparisonAsync(fromServer, toServer);
}
catch (Exception ex)
{
@@ -4978,6 +5081,7 @@ private async void OnProcStatsSlicerChanged(object? sender, Controls.SlicerRange
var toServer = ServerTimeHelper.ToServerTime(e.EndUtc);
var procStats = await _dataService.GetTopProceduresByCpuAsync(_serverId, 0, 50, fromServer, toServer, UtcOffsetMinutes);
_procStatsFilterMgr!.UpdateData(procStats);
+ await RefreshProcStatsComparisonAsync(fromServer, toServer);
}
catch (Exception ex)
{
@@ -5247,10 +5351,6 @@ public void DisposeChartHelpers()
{
_waitStatsHover?.Dispose();
_perfmonHover?.Dispose();
- _overviewCpuHover?.Dispose();
- _overviewMemoryHover?.Dispose();
- _overviewFileIoHover?.Dispose();
- _overviewWaitStatsHover?.Dispose();
_cpuHover?.Dispose();
_memoryHover?.Dispose();
_tempDbHover?.Dispose();
diff --git a/Lite/Controls/TimeRangeSlicerControl.xaml.cs b/Lite/Controls/TimeRangeSlicerControl.xaml.cs
index 99d3077..2ac759d 100644
--- a/Lite/Controls/TimeRangeSlicerControl.xaml.cs
+++ b/Lite/Controls/TimeRangeSlicerControl.xaml.cs
@@ -229,7 +229,7 @@ public void Redraw()
SlicerCanvas.Children.Add(new Path { Data = lineGeo, Stroke = lineBrush, StrokeThickness = 1.5 });
// X-axis labels — evenly spaced by TIME, skip if too close
- var labelBrush = FindBrush("SlicerLabelBrush", "#99E4E6EB");
+ var labelBrush = FindBrush("SlicerLabelBrush", "#E4E6EB");
const double minLabelSpacingPx = 90;
double lastLabelX = -minLabelSpacingPx;
int targetLabels = Math.Max(2, (int)(w / minLabelSpacingPx));
diff --git a/Lite/Helpers/CorrelatedCrosshairManager.cs b/Lite/Helpers/CorrelatedCrosshairManager.cs
new file mode 100644
index 0000000..75bce9b
--- /dev/null
+++ b/Lite/Helpers/CorrelatedCrosshairManager.cs
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2026 Erik Darling, Darling Data LLC
+ *
+ * This file is part of the SQL Server Performance Monitor Lite.
+ *
+ * Licensed under the MIT License. See LICENSE file in the project root for full license information.
+ *
+ * SYNC WARNING: Dashboard has a matching copy at Dashboard/Helpers/CorrelatedCrosshairManager.cs.
+ * Changes here must be mirrored there.
+ */
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Windows;
+using System.Windows.Controls;
+using System.Windows.Controls.Primitives;
+using System.Windows.Documents;
+using System.Windows.Input;
+using System.Windows.Media;
+using PerformanceMonitorLite.Services;
+
+namespace PerformanceMonitorLite.Helpers;
+
+///
+/// Synchronizes vertical crosshair lines across multiple ScottPlot charts.
+/// When the user hovers over any lane, all lanes show a VLine at the same X (time)
+/// coordinate and value labels update to show each lane's value at that time.
+///
+internal sealed class CorrelatedCrosshairManager : IDisposable
+{
+ private readonly List _lanes = new();
+ private readonly Popup _tooltip;
+ private readonly TextBlock _tooltipText;
+ private DateTime _lastUpdate;
+ private bool _isRefreshing;
+
+ public CorrelatedCrosshairManager()
+ {
+ _tooltipText = new TextBlock
+ {
+ Foreground = new SolidColorBrush(Color.FromRgb(0xE0, 0xE0, 0xE0)),
+ FontSize = 13
+ };
+
+ _tooltip = new Popup
+ {
+ Placement = PlacementMode.Relative,
+ IsHitTestVisible = false,
+ AllowsTransparency = true,
+ Child = new Border
+ {
+ Background = new SolidColorBrush(Color.FromRgb(0x33, 0x33, 0x33)),
+ BorderBrush = new SolidColorBrush(Color.FromRgb(0x55, 0x55, 0x55)),
+ BorderThickness = new Thickness(1),
+ CornerRadius = new CornerRadius(3),
+ Padding = new Thickness(8, 4, 8, 4),
+ Child = _tooltipText
+ }
+ };
+ }
+
+ ///
+ /// Registers a chart lane for crosshair synchronization.
+ ///
+ public void AddLane(ScottPlot.WPF.WpfPlot chart, string label, string unit)
+ {
+ var lane = new LaneInfo
+ {
+ Chart = chart,
+ Label = label,
+ Unit = unit
+ };
+
+ chart.MouseMove += (s, e) => OnMouseMove(lane, e);
+ chart.MouseLeave += (s, e) => OnMouseLeave();
+
+ _lanes.Add(lane);
+ }
+
+ ///
+ /// Sets the expected baseline range for a lane (upper/lower bounds).
+ /// Values outside this range get ▲/▼ indicators in the tooltip.
+ ///
+ public void SetLaneBaseline(ScottPlot.WPF.WpfPlot chart, double lower, double upper,
+ double minAnomalyValue = 0, bool isEventBased = false)
+ {
+ var lane = _lanes.Find(l => l.Chart == chart);
+ if (lane == null) return;
+ lane.BaselineLower = lower;
+ lane.BaselineUpper = upper;
+ lane.MinAnomalyValue = minAnomalyValue;
+ lane.IsEventBased = isEventBased;
+ }
+
+ ///
+ /// Sets a single data series for a lane (most lanes have one series).
+ ///
+ public void SetLaneData(ScottPlot.WPF.WpfPlot chart, double[] times, double[] values,
+ bool isEventBased = false)
+ {
+ var lane = _lanes.Find(l => l.Chart == chart);
+ if (lane == null) return;
+
+ lane.Series.Clear();
+ lane.Series.Add(new DataSeries
+ {
+ Name = lane.Label,
+ Times = times,
+ Values = values,
+ IsEventBased = isEventBased
+ });
+ }
+
+ ///
+ /// Adds a named data series to a lane (for lanes with multiple overlaid series).
+ /// Call SetLaneData first to clear, then AddLaneSeries for additional series.
+ ///
+ public void AddLaneSeries(ScottPlot.WPF.WpfPlot chart, string name, string unit,
+ double[] times, double[] values, bool isEventBased = false)
+ {
+ var lane = _lanes.Find(l => l.Chart == chart);
+ if (lane == null) return;
+
+ lane.Series.Add(new DataSeries
+ {
+ Name = name,
+ Unit = unit,
+ Times = times,
+ Values = values,
+ IsEventBased = isEventBased
+ });
+ }
+
+ ///
+ /// Sets the label shown in the tooltip for comparison data (e.g., "yesterday").
+ ///
+ public void SetComparisonLabel(string label)
+ {
+ _comparisonLabel = label;
+ }
+
+ private string? _comparisonLabel;
+
+ ///
+ /// Clears data and VLines. Call before re-populating charts.
+ ///
+ public void PrepareForRefresh()
+ {
+ _isRefreshing = true;
+ _tooltip.IsOpen = false;
+ _comparisonLabel = null;
+ foreach (var lane in _lanes)
+ {
+ lane.Series.Clear();
+ lane.VLine = null;
+ lane.BaselineUpper = null;
+ lane.BaselineLower = null;
+ lane.MinAnomalyValue = 0;
+ }
+ }
+
+ ///
+ /// Creates fresh VLine plottables on each lane's chart.
+ /// Must be called AFTER chart data is populated.
+ ///
+ public void ReattachVLines()
+ {
+ foreach (var lane in _lanes)
+ {
+ var vline = lane.Chart.Plot.Add.VerticalLine(0);
+ vline.Color = ScottPlot.Color.FromHex("#FFFFFF").WithAlpha(100);
+ vline.LineWidth = 1;
+ vline.LinePattern = ScottPlot.LinePattern.Dashed;
+ vline.IsVisible = false;
+ lane.VLine = vline;
+ }
+ _isRefreshing = false;
+ }
+
+ private void OnMouseMove(LaneInfo sourceLane, MouseEventArgs e)
+ {
+ if (_isRefreshing || sourceLane.VLine == null) return;
+
+ var now = DateTime.UtcNow;
+ if ((now - _lastUpdate).TotalMilliseconds < 16) return;
+ _lastUpdate = now;
+
+ var pos = e.GetPosition(sourceLane.Chart);
+ var dpi = VisualTreeHelper.GetDpi(sourceLane.Chart);
+ var pixel = new ScottPlot.Pixel(
+ (float)(pos.X * dpi.DpiScaleX),
+ (float)(pos.Y * dpi.DpiScaleY));
+ var mouseCoords = sourceLane.Chart.Plot.GetCoordinates(pixel);
+ double xValue = mouseCoords.X;
+
+ _tooltipText.Inlines.Clear();
+ var time = DateTime.FromOADate(xValue);
+ var displayTime = ServerTimeHelper.ConvertForDisplay(time, ServerTimeHelper.CurrentDisplayMode);
+ _tooltipText.Inlines.Add(new Run(displayTime.ToString("yyyy-MM-dd HH:mm:ss")));
+ if (_comparisonLabel != null)
+ _tooltipText.Inlines.Add(new Run($" (dashed = {_comparisonLabel})") { Foreground = DimBrush });
+
+ var defaultBrush = new SolidColorBrush(Color.FromRgb(0xE0, 0xE0, 0xE0));
+
+ foreach (var lane in _lanes)
+ {
+ if (lane.VLine == null) continue;
+
+ lane.VLine.IsVisible = true;
+ lane.VLine.X = xValue;
+
+ if (lane.Series.Count == 1)
+ {
+ var series = lane.Series[0];
+ double? value = FindNearestValue(series, xValue);
+
+ if (value.HasValue)
+ {
+ var indicator = GetBaselineIndicator(lane, value.Value);
+
+ // Tooltip: value + arrow + "30d avg" context
+ _tooltipText.Inlines.Add(new Run($"\n{lane.Label}: {value.Value:N1} {lane.Unit}") { Foreground = defaultBrush });
+ if (indicator != null)
+ {
+ _tooltipText.Inlines.Add(new Run($" {indicator.Value.Symbol}") { Foreground = indicator.Value.Brush });
+ }
+ }
+ else
+ {
+ _tooltipText.Inlines.Add(new Run($"\n{lane.Label}: —") { Foreground = defaultBrush });
+ }
+ }
+ else if (lane.Series.Count > 1)
+ {
+ foreach (var series in lane.Series)
+ {
+ double? value = FindNearestValue(series, xValue);
+ string unit = series.Unit ?? lane.Unit;
+ if (value.HasValue)
+ {
+ _tooltipText.Inlines.Add(new Run($"\n{series.Name}: {value.Value:N0} {unit}") { Foreground = defaultBrush });
+ var indicator = GetBaselineIndicator(lane, value.Value);
+ if (indicator != null)
+ _tooltipText.Inlines.Add(new Run($" {indicator.Value.Symbol}") { Foreground = indicator.Value.Brush });
+ }
+ else
+ _tooltipText.Inlines.Add(new Run($"\n{series.Name}: —") { Foreground = defaultBrush });
+ }
+ }
+ else
+ {
+ _tooltipText.Inlines.Add(new Run($"\n{lane.Label}: —") { Foreground = defaultBrush });
+ }
+
+ lane.Chart.Refresh();
+ }
+ _tooltip.PlacementTarget = sourceLane.Chart;
+ _tooltip.HorizontalOffset = pos.X + 15;
+ _tooltip.VerticalOffset = pos.Y + 15;
+ _tooltip.IsOpen = true;
+ }
+
+ private static double? FindNearestValue(DataSeries series, double targetX)
+ {
+ if (series.Times == null || series.Values == null || series.Times.Length == 0)
+ return null;
+
+ var times = series.Times;
+ var values = series.Values;
+
+ int lo = 0, hi = times.Length - 1;
+ while (lo < hi)
+ {
+ int mid = (lo + hi) / 2;
+ if (times[mid] < targetX)
+ lo = mid + 1;
+ else
+ hi = mid;
+ }
+
+ int best = lo;
+ if (lo > 0 && Math.Abs(times[lo - 1] - targetX) < Math.Abs(times[lo] - targetX))
+ best = lo - 1;
+
+ double val = values[best];
+ if (double.IsNaN(val)) return null;
+
+ if (series.IsEventBased)
+ {
+ double oneMinute = 1.0 / 1440.0;
+ if (Math.Abs(times[best] - targetX) > oneMinute)
+ return 0;
+ }
+
+ return val;
+ }
+
+ private static readonly SolidColorBrush RedBrush = new(Color.FromRgb(0xFF, 0x52, 0x52));
+ private static readonly SolidColorBrush GreenBrush = new(Color.FromRgb(0x69, 0xF0, 0x69));
+ private static readonly SolidColorBrush DimBrush = new(Color.FromRgb(0x90, 0x96, 0xA0));
+
+ private record struct BaselineIndicator(string Symbol, SolidColorBrush Brush);
+
+ private static string? FormatBaselineContext(LaneInfo lane)
+ {
+ if (lane.BaselineUpper == null || lane.BaselineLower == null) return null;
+ var mean = (lane.BaselineUpper.Value + lane.BaselineLower.Value) / 2.0;
+ var formatted = mean >= 1000 ? $"{mean:N0}" : mean >= 10 ? $"{mean:N1}" : $"{mean:N2}";
+ return $"30d avg: ~{formatted}";
+ }
+
+ private static BaselineIndicator? GetBaselineIndicator(LaneInfo lane, double value)
+ {
+ if (lane.BaselineUpper == null || lane.BaselineLower == null) return null;
+ // For event-based metrics (blocking/deadlocks): value significantly above
+ // the baseline mean is a spike, even if within the wide ± 2σ band.
+ // Uses 3x mean as threshold — if you normally see ~5 events and now see 20, that's a spike.
+ var mean = (lane.BaselineUpper.Value + lane.BaselineLower.Value) / 2.0;
+ if (lane.IsEventBased && value >= 1.0 && (mean < 1.0 || value > mean * 3))
+ return new BaselineIndicator("▲", RedBrush);
+ // ▲ requires both: outside band AND above absolute minimum (prevents 1% CPU false alarms)
+ if (value > lane.BaselineUpper.Value && value >= lane.MinAnomalyValue)
+ return new BaselineIndicator("▲", RedBrush);
+ // ▼ always shown when below band (drops are always interesting — tuning feedback)
+ if (value < lane.BaselineLower.Value)
+ return new BaselineIndicator("▼", GreenBrush);
+ return null;
+ }
+
+ private void OnMouseLeave()
+ {
+ _tooltip.IsOpen = false;
+ foreach (var lane in _lanes)
+ {
+ if (lane.VLine != null)
+ lane.VLine.IsVisible = false;
+ lane.Chart.Refresh();
+ }
+ }
+
+ public void Dispose()
+ {
+ _tooltip.IsOpen = false;
+ foreach (var lane in _lanes)
+ {
+ lane.Series.Clear();
+ lane.VLine = null;
+ }
+ _lanes.Clear();
+ }
+
+ private class DataSeries
+ {
+ public string Name { get; set; } = "";
+ public string? Unit { get; set; }
+ public double[]? Times { get; set; }
+ public double[]? Values { get; set; }
+ public bool IsEventBased { get; set; }
+ }
+
+ private class LaneInfo
+ {
+ public ScottPlot.WPF.WpfPlot Chart { get; set; } = null!;
+ public string Label { get; set; } = "";
+ public string Unit { get; set; } = "";
+ public ScottPlot.Plottables.VerticalLine? VLine { get; set; }
+ public List Series { get; set; } = new();
+ public double? BaselineUpper { get; set; }
+ public double? BaselineLower { get; set; }
+ public double MinAnomalyValue { get; set; }
+ public bool IsEventBased { get; set; }
+ }
+}
diff --git a/Lite/Mcp/McpAnalysisTools.cs b/Lite/Mcp/McpAnalysisTools.cs
index 9544b46..c29dca0 100644
--- a/Lite/Mcp/McpAnalysisTools.cs
+++ b/Lite/Mcp/McpAnalysisTools.cs
@@ -9,7 +9,7 @@ namespace PerformanceMonitorLite.Mcp;
[McpServerToolType]
public sealed class McpAnalysisTools
{
- [McpServerTool(Name = "analyze_server"), Description("Runs the diagnostic inference engine against a server's collected data. Scores wait stats, blocking, memory, config, and other facts, then traverses a relationship graph to build evidence-backed stories about what's wrong and why. Returns structured findings with severity scores, evidence chains, and recommended next tools to call. The AI client should interpret the findings and provide recommendations — the engine provides the reasoning, not the prose.")]
+ [McpServerTool(Name = "analyze_server"), Description("Runs the diagnostic inference engine against a server's collected data. Scores wait stats, blocking, memory, config, and other facts, then traverses a relationship graph to build evidence-backed stories about what's wrong and why. Anomaly detection compares the analysis window against 30-day time-bucketed baselines (hour-of-day x day-of-week) to identify deviations that are unusual for this specific time slot, not just unusual overall. Returns structured findings with severity scores, evidence chains, baseline context for anomalies, and recommended next tools to call.")]
public static async Task AnalyzeServer(
AnalysisService analysisService,
ServerManager serverManager,
@@ -162,13 +162,13 @@ public static async Task GetAnalysisFacts(
}
}
- [McpServerTool(Name = "compare_analysis"), Description("Compares two time periods by running the inference engine's fact collection and scoring on each, then showing what changed. Use this to compare peak vs off-peak, before vs after a change, or yesterday vs today. Returns facts from both periods side-by-side with severity deltas.")]
+ [McpServerTool(Name = "compare_analysis"), Description("Compares two time periods by running the inference engine's fact collection and scoring on each, then showing what changed. Use this to compare peak vs off-peak, before vs after a change, or yesterday vs today. Returns facts from both periods side-by-side with severity deltas. Note: for routine anomaly detection, use analyze_server instead — it automatically compares against 30-day time-bucketed baselines (hour-of-day x day-of-week). This tool is for explicit window-to-window comparisons.")]
public static async Task CompareAnalysis(
AnalysisService analysisService,
ServerManager serverManager,
[Description("Server name or display name.")] string? server_name = null,
[Description("Hours back for the comparison (recent) period. Default 4.")] int hours_back = 4,
- [Description("Hours back for the baseline period start, measured from now. Default 28 (yesterday same time, assuming 4-hour windows). The baseline period will be the same duration as the comparison period.")] int baseline_hours_back = 28)
+ [Description("Hours back for the baseline period start, measured from now. Default 28 (yesterday same time). The baseline period will be the same duration as the comparison period.")] int baseline_hours_back = 28)
{
var resolved = ServerResolver.Resolve(serverManager, server_name);
if (resolved == null)
@@ -833,6 +833,48 @@ public static List GetForStoryPath(string storyPath)
return result;
}
+
+ private static readonly string[] DayNames = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"];
+
+ ///
+ /// Formats baseline context from anomaly fact metadata into a human-readable object
+ /// for MCP output. Example: "4.1σ above baseline for Tue 14:00, mean 68.2"
+ ///
+ private static object? FormatBaselineContext(Dictionary metadata)
+ {
+ var result = new Dictionary();
+
+ if (metadata.TryGetValue("deviation_sigma", out var sigma))
+ result["deviation"] = $"{sigma:F1}σ";
+
+ if (metadata.TryGetValue("ratio", out var ratio))
+ result["ratio"] = $"{ratio:F1}x";
+
+ if (metadata.TryGetValue("baseline_mean", out var mean))
+ result["baseline_mean"] = Math.Round(mean, 2);
+
+ if (metadata.TryGetValue("baseline_mean_ms", out var meanMs))
+ result["baseline_mean"] = Math.Round(meanMs, 2);
+
+ if (metadata.TryGetValue("baseline_stddev", out var stddev))
+ result["baseline_stddev"] = Math.Round(stddev, 2);
+
+ if (metadata.TryGetValue("baseline_hour", out var hour) &&
+ metadata.TryGetValue("baseline_dow", out var dow))
+ {
+ var dowIdx = (int)dow;
+ var dayName = dowIdx >= 0 && dowIdx < DayNames.Length ? DayNames[dowIdx] : "?";
+ result["bucket"] = hour >= 0 ? $"{dayName} {(int)hour:00}:00" : "flat";
+ }
+
+ if (metadata.TryGetValue("baseline_tier", out var tier))
+ result["tier"] = tier switch { 0 => "full", 1 => "hour_only", _ => "flat" };
+
+ if (metadata.TryGetValue("baseline_samples", out var samples))
+ result["baseline_samples"] = (int)samples;
+
+ return result.Count > 0 ? result : null;
+ }
}
internal record ToolRecommendation(
diff --git a/Lite/Models/ComparisonItemBase.cs b/Lite/Models/ComparisonItemBase.cs
new file mode 100644
index 0000000..e5271a8
--- /dev/null
+++ b/Lite/Models/ComparisonItemBase.cs
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2026 Erik Darling, Darling Data LLC
+ *
+ * This file is part of the SQL Server Performance Monitor Lite.
+ *
+ * Licensed under the MIT License. See LICENSE file in the project root for full license information.
+ */
+
+using System.Windows.Media;
+
+namespace PerformanceMonitorLite.Models;
+
+public abstract class ComparisonItemBase
+{
+ public string DatabaseName { get; set; } = "";
+
+ // Current period
+ public long ExecutionCount { get; set; }
+ public double AvgDurationMs { get; set; }
+ public double AvgCpuMs { get; set; }
+ public double AvgReads { get; set; }
+
+ // Baseline period
+ public long BaselineExecutionCount { get; set; }
+ public double BaselineAvgDurationMs { get; set; }
+ public double BaselineAvgCpuMs { get; set; }
+ public double BaselineAvgReads { get; set; }
+
+ // Flags
+ public bool IsNew => ExecutionCount > 0 && BaselineExecutionCount == 0;
+ public bool IsGone => ExecutionCount == 0 && BaselineExecutionCount > 0;
+
+ // Delta percentages (null when baseline is zero or item is new/gone)
+ public double? DurationDeltaPct => ComputeDelta(AvgDurationMs, BaselineAvgDurationMs);
+ public double? CpuDeltaPct => ComputeDelta(AvgCpuMs, BaselineAvgCpuMs);
+ public double? ReadsDeltaPct => ComputeDelta(AvgReads, BaselineAvgReads);
+ public double? ExecutionDeltaPct => ComputeDelta(ExecutionCount, BaselineExecutionCount);
+
+ // Display helpers for grid binding
+ public string DurationDeltaDisplay => FormatDelta(DurationDeltaPct);
+ public string CpuDeltaDisplay => FormatDelta(CpuDeltaPct);
+ public string ReadsDeltaDisplay => FormatDelta(ReadsDeltaPct);
+ public string ExecutionDeltaDisplay => FormatDelta(ExecutionDeltaPct);
+
+ public string StatusBadge => IsNew ? "NEW" : IsGone ? "GONE" : "";
+
+ // Sort key: NEW at top (0), normal by delta (1), GONE at bottom (2)
+ public int SortGroup => IsNew ? 0 : IsGone ? 2 : 1;
+ public double SortableDurationDelta => DurationDeltaPct ?? (IsNew ? double.MaxValue : double.MinValue);
+
+ // Color brushes for delta columns (red = regression, green = improvement)
+ public Brush DurationDeltaBrush => GetDeltaBrush(DurationDeltaPct, 25);
+ public Brush CpuDeltaBrush => GetDeltaBrush(CpuDeltaPct, 25);
+ public Brush ReadsDeltaBrush => GetDeltaBrush(ReadsDeltaPct, 50, 25);
+ public Brush ExecutionDeltaBrush => GetDeltaBrush(ExecutionDeltaPct, 100, 50);
+
+ private static readonly Brush RedBrush = new SolidColorBrush(Color.FromRgb(0xFF, 0x6B, 0x6B));
+ private static readonly Brush GreenBrush = new SolidColorBrush(Color.FromRgb(0x4E, 0xC9, 0xB0));
+ private static readonly Brush NeutralBrush = Brushes.Transparent;
+
+ static ComparisonItemBase()
+ {
+ RedBrush.Freeze();
+ GreenBrush.Freeze();
+ }
+
+ private static Brush GetDeltaBrush(double? delta, double redThreshold, double greenThreshold = 25)
+ {
+ if (!delta.HasValue) return NeutralBrush;
+ if (delta.Value > redThreshold) return RedBrush;
+ if (delta.Value < -greenThreshold) return GreenBrush;
+ return NeutralBrush;
+ }
+
+ private static double? ComputeDelta(double current, double baseline)
+ {
+ if (baseline == 0) return null;
+ return (current - baseline) / baseline * 100.0;
+ }
+
+ private static string FormatDelta(double? delta)
+ {
+ if (!delta.HasValue) return "\u2014";
+ var sign = delta.Value >= 0 ? "+" : "";
+ return $"{sign}{delta.Value:N1}%";
+ }
+}
diff --git a/Lite/Models/PlanModels.cs b/Lite/Models/PlanModels.cs
index a6e62f7..50a3392 100644
--- a/Lite/Models/PlanModels.cs
+++ b/Lite/Models/PlanModels.cs
@@ -251,6 +251,10 @@ public class PlanNode
public List Warnings { get; set; } = new();
public bool HasWarnings => Warnings.Count > 0;
+ // Modification operator: nonclustered indexes maintained
+ public int NonClusteredIndexCount { get; set; }
+ public List NonClusteredIndexNames { get; set; } = new();
+
// Tree structure
public List Children { get; set; } = new();
public PlanNode? Parent { get; set; }
diff --git a/Lite/Models/ProcedureStatsComparisonItem.cs b/Lite/Models/ProcedureStatsComparisonItem.cs
new file mode 100644
index 0000000..8c010b6
--- /dev/null
+++ b/Lite/Models/ProcedureStatsComparisonItem.cs
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2026 Erik Darling, Darling Data LLC
+ *
+ * This file is part of the SQL Server Performance Monitor Lite.
+ *
+ * Licensed under the MIT License. See LICENSE file in the project root for full license information.
+ */
+
+namespace PerformanceMonitorLite.Models;
+
+public class ProcedureStatsComparisonItem : ComparisonItemBase
+{
+ public string SchemaName { get; set; } = "";
+ public string ObjectName { get; set; } = "";
+ public string FullName => string.IsNullOrEmpty(SchemaName) ? ObjectName : $"{SchemaName}.{ObjectName}";
+}
diff --git a/Lite/Models/QueryStatsComparisonItem.cs b/Lite/Models/QueryStatsComparisonItem.cs
new file mode 100644
index 0000000..531443e
--- /dev/null
+++ b/Lite/Models/QueryStatsComparisonItem.cs
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2026 Erik Darling, Darling Data LLC
+ *
+ * This file is part of the SQL Server Performance Monitor Lite.
+ *
+ * Licensed under the MIT License. See LICENSE file in the project root for full license information.
+ */
+
+namespace PerformanceMonitorLite.Models;
+
+public class QueryStatsComparisonItem : ComparisonItemBase
+{
+ public string QueryHash { get; set; } = "";
+ public string QueryText { get; set; } = "";
+}
diff --git a/Lite/PerformanceMonitorLite.csproj b/Lite/PerformanceMonitorLite.csproj
index 5ccd3c3..d87dde0 100644
--- a/Lite/PerformanceMonitorLite.csproj
+++ b/Lite/PerformanceMonitorLite.csproj
@@ -8,10 +8,10 @@
PerformanceMonitorLite
PerformanceMonitorLite
SQL Server Performance Monitor Lite
- 2.5.0
- 2.5.0.0
- 2.5.0.0
- 2.5.0
+ 2.6.0
+ 2.6.0.0
+ 2.6.0.0
+ 2.6.0
Darling Data, LLC
Copyright © 2026 Darling Data, LLC
Lightweight SQL Server performance monitoring - no installation required on target servers
diff --git a/Lite/Services/ArchiveService.cs b/Lite/Services/ArchiveService.cs
index 9f5f0bb..d286589 100644
--- a/Lite/Services/ArchiveService.cs
+++ b/Lite/Services/ArchiveService.cs
@@ -305,18 +305,8 @@ private void CompactParquetFiles()
}
}
- /* Compact each group that has more than one file (or any non-monthly files) */
- using var con = new DuckDBConnection("DataSource=:memory:");
- con.Open();
-
- /* Cap memory to avoid multi-GB spikes decompressing large parquet archives.
- DuckDB will spill excess to its temp directory automatically. */
- using (var pragma = con.CreateCommand())
- {
- pragma.CommandText = "SET memory_limit = '2GB'; SET preserve_insertion_order = false;";
- pragma.ExecuteNonQuery();
- }
-
+ /* Compact each group that has more than one file (or any non-monthly files).
+ Each group gets its own DuckDB connection so memory is fully released between groups. */
var totalMerged = 0;
var totalRemoved = 0;
@@ -346,16 +336,16 @@ DuckDB will spill excess to its temp directory automatically. */
var sourcePaths = files
.Select(f => Path.Combine(_archivePath, f).Replace("\\", "/"))
.ToList();
- var pathList = string.Join(", ", sourcePaths.Select(p => $"'{p}'"));
- /* Build SELECT with column exclusions for specific tables.
- Only exclude columns that actually exist in the source files
- (they may have been stripped in a previous compaction). */
+ /* Determine column exclusions up front using all source files */
var selectClause = "*";
if (CompactionExcludeColumns.TryGetValue(table, out var excludeCols))
{
- using var schemaCmd = con.CreateCommand();
- schemaCmd.CommandText = $"SELECT column_name FROM (DESCRIBE SELECT * FROM read_parquet([{pathList}], union_by_name=true))";
+ using var schemaCon = new DuckDBConnection("DataSource=:memory:");
+ schemaCon.Open();
+ var allPathList = string.Join(", ", sourcePaths.Select(p => $"'{p}'"));
+ using var schemaCmd = schemaCon.CreateCommand();
+ schemaCmd.CommandText = $"SELECT column_name FROM (DESCRIBE SELECT * FROM read_parquet([{allPathList}], union_by_name=true))";
using var reader = schemaCmd.ExecuteReader();
var existingCols = new HashSet(StringComparer.OrdinalIgnoreCase);
while (reader.Read()) existingCols.Add(reader.GetString(0));
@@ -367,10 +357,65 @@ Only exclude columns that actually exist in the source files
}
}
- using var cmd = con.CreateCommand();
- cmd.CommandText = $"COPY (SELECT {selectClause} FROM read_parquet([{pathList}], union_by_name=true)) " +
- $"TO '{tempPath}' (FORMAT PARQUET, COMPRESSION ZSTD, ROW_GROUP_SIZE 122880)";
- cmd.ExecuteNonQuery();
+ if (sourcePaths.Count <= 2)
+ {
+ /* Small group — single-pass merge */
+ using var con = new DuckDBConnection("DataSource=:memory:");
+ con.Open();
+ using (var pragma = con.CreateCommand())
+ {
+ pragma.CommandText = "SET memory_limit = '4GB'; SET preserve_insertion_order = false;";
+ pragma.ExecuteNonQuery();
+ }
+
+ var pathList = string.Join(", ", sourcePaths.Select(p => $"'{p}'"));
+ using var cmd = con.CreateCommand();
+ cmd.CommandText = $"COPY (SELECT {selectClause} FROM read_parquet([{pathList}], union_by_name=true)) " +
+ $"TO '{tempPath}' (FORMAT PARQUET, COMPRESSION ZSTD, ROW_GROUP_SIZE 122880)";
+ cmd.ExecuteNonQuery();
+ }
+ else
+ {
+ /* Large group — incremental merge (pairs) to keep peak memory low.
+ Sort smallest-first so early merges are cheap. */
+ var sorted = sourcePaths
+ .OrderBy(p => new FileInfo(p.Replace("/", "\\")).Length)
+ .ToList();
+
+ var currentPath = sorted[0];
+ var intermediateFiles = new List();
+
+ for (var i = 1; i < sorted.Count; i++)
+ {
+ var stepOutput = i < sorted.Count - 1
+ ? targetPath + $".step{i}.tmp"
+ : tempPath;
+
+ using var con = new DuckDBConnection("DataSource=:memory:");
+ con.Open();
+ using (var pragma = con.CreateCommand())
+ {
+ pragma.CommandText = "SET memory_limit = '4GB'; SET preserve_insertion_order = false;";
+ pragma.ExecuteNonQuery();
+ }
+
+ var pairList = $"'{currentPath}', '{sorted[i]}'";
+ using var cmd = con.CreateCommand();
+ cmd.CommandText = $"COPY (SELECT {selectClause} FROM read_parquet([{pairList}], union_by_name=true)) " +
+ $"TO '{stepOutput}' (FORMAT PARQUET, COMPRESSION ZSTD, ROW_GROUP_SIZE 122880)";
+ cmd.ExecuteNonQuery();
+
+ /* Clean up previous intermediate file */
+ if (intermediateFiles.Count > 0)
+ {
+ var prev = intermediateFiles[^1];
+ try { File.Delete(prev); } catch { /* best effort */ }
+ }
+
+ intermediateFiles.Add(stepOutput);
+ currentPath = stepOutput;
+ }
+ }
/* Remove originals */
var removed = 0;
@@ -404,11 +449,15 @@ Only exclude columns that actually exist in the source files
{
_logger?.LogError(ex, "Failed to compact {Month}/{Table} ({Count} files)", month, table, files.Count);
- /* Clean up temp file on failure */
+ /* Clean up temp and intermediate files on failure */
if (File.Exists(tempPath))
{
try { File.Delete(tempPath); } catch { /* best effort */ }
}
+ foreach (var stepFile in Directory.GetFiles(_archivePath, $"{targetMonth}_{table}.parquet.step*.tmp"))
+ {
+ try { File.Delete(stepFile); } catch { /* best effort */ }
+ }
}
}
diff --git a/Lite/Services/EmailTemplateBuilder.cs b/Lite/Services/EmailTemplateBuilder.cs
index 7dd5596..744d24d 100644
--- a/Lite/Services/EmailTemplateBuilder.cs
+++ b/Lite/Services/EmailTemplateBuilder.cs
@@ -116,7 +116,7 @@ private static string BuildHtmlBody(
sb.Append($"");
sb.Append("");
sb.Append($"SQL Server Performance Monitor ");
- sb.Append($"{WebUtility.HtmlEncode(EditionName)} ");
+ sb.Append($"{WebUtility.HtmlEncode(EditionName)} ");
sb.Append("
");
sb.Append("
");
@@ -167,7 +167,7 @@ private static string BuildHtmlBody(
/* Footer */
sb.Append("");
- sb.Append($"");
+ sb.Append($"");
sb.Append($"Sent by {WebUtility.HtmlEncode(EditionName)}");
if (!isTest)
{
@@ -200,7 +200,7 @@ private static void AppendDetailSection(StringBuilder sb, AlertContext context)
/* Separator + heading */
sb.Append(" ");
sb.Append("");
- sb.Append($"RECENT EVENTS ");
+ sb.Append($"RECENT EVENTS ");
sb.Append(" ");
foreach (var item in context.Details)
diff --git a/Lite/Services/LocalDataService.Baselines.cs b/Lite/Services/LocalDataService.Baselines.cs
new file mode 100644
index 0000000..93fd344
--- /dev/null
+++ b/Lite/Services/LocalDataService.Baselines.cs
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2026 Erik Darling, Darling Data LLC
+ *
+ * This file is part of the SQL Server Performance Monitor Lite.
+ *
+ * Licensed under the MIT License. See LICENSE file in the project root for full license information.
+ */
+
+using PerformanceMonitorLite.Analysis;
+
+namespace PerformanceMonitorLite.Services;
+
+public partial class LocalDataService
+{
+ private BaselineProvider? _baselineProvider;
+
+ private BaselineProvider GetBaselineProvider()
+ {
+ return _baselineProvider ??= new BaselineProvider(_duckDb);
+ }
+
+ ///
+ /// Gets the baseline (mean ± stddev) for a metric at a specific time.
+ /// Returns null if no baseline data is available.
+ ///
+ public async Task GetBaselineForLaneAsync(
+ int serverId, string metricName, DateTime referenceTime)
+ {
+ var baseline = await GetBaselineProvider().GetBaselineAsync(serverId, metricName, referenceTime);
+ return baseline.SampleCount > 0 ? baseline : BaselineBucket.Empty;
+ }
+}
diff --git a/Lite/Services/LocalDataService.Blocking.cs b/Lite/Services/LocalDataService.Blocking.cs
index ee9037c..73556eb 100644
--- a/Lite/Services/LocalDataService.Blocking.cs
+++ b/Lite/Services/LocalDataService.Blocking.cs
@@ -564,7 +564,7 @@ GROUP BY DATE_TRUNC('minute', event_time)
}
///
- /// Gets deadlock trend (count of deadlocks per hour bucket).
+ /// Gets deadlock trend (count of deadlocks per minute bucket).
///
public async Task> GetDeadlockTrendAsync(int serverId, int hoursBack = 24, DateTime? fromDate = null, DateTime? toDate = null)
{
@@ -579,13 +579,13 @@ public async Task> GetDeadlockTrendAsync(int serverId, int hour
deadlock_count
FROM (
SELECT
- DATE_TRUNC('hour', deadlock_time) AS bucket,
+ DATE_TRUNC('minute', deadlock_time) AS bucket,
COUNT(*) AS deadlock_count
FROM v_deadlocks
WHERE server_id = $1
AND collection_time >= $2
AND collection_time <= $3
- GROUP BY DATE_TRUNC('hour', deadlock_time)
+ GROUP BY DATE_TRUNC('minute', deadlock_time)
) sub
ORDER BY bucket";
diff --git a/Lite/Services/LocalDataService.CollectionHealth.cs b/Lite/Services/LocalDataService.CollectionHealth.cs
index 19c47eb..e28338f 100644
--- a/Lite/Services/LocalDataService.CollectionHealth.cs
+++ b/Lite/Services/LocalDataService.CollectionHealth.cs
@@ -189,6 +189,19 @@ public class CollectionLogRow
public class CollectorHealthRow
{
+ ///
+ /// On-load collectors run once per tab open, not on the scheduled loop.
+ /// Staleness thresholds don't apply to them.
+ ///
+ private static readonly HashSet OnLoadCollectors = new(StringComparer.OrdinalIgnoreCase)
+ {
+ "server_config",
+ "database_config",
+ "database_scoped_config",
+ "trace_flags",
+ "server_properties"
+ };
+
public string CollectorName { get; set; } = "";
public long TotalRuns { get; set; }
public long SuccessCount { get; set; }
@@ -211,6 +224,11 @@ public string HealthStatus
{
if (TotalRuns == 0) return "NEVER_RUN";
if (PermissionDeniedCount > 0 && ErrorCount == 0 && SuccessCount == 0) return "NO_PERMISSIONS";
+ if (OnLoadCollectors.Contains(CollectorName))
+ {
+ if (FailureRatePercent > 20) return "WARNING";
+ return "HEALTHY";
+ }
if (HoursSinceLastSuccess > 24) return "FAILING";
if (HoursSinceLastSuccess > 4) return "STALE";
if (FailureRatePercent > 20) return "WARNING";
diff --git a/Lite/Services/LocalDataService.FinOps.cs b/Lite/Services/LocalDataService.FinOps.cs
index 1dc7101..73bdbe5 100644
--- a/Lite/Services/LocalDataService.FinOps.cs
+++ b/Lite/Services/LocalDataService.FinOps.cs
@@ -1569,23 +1569,42 @@ public async Task> GetRecommendationsAsync(int serverId,
if (edition.Contains("Enterprise", StringComparison.OrdinalIgnoreCase))
{
+ /*
+ sys.dm_db_persisted_sku_features is database-scoped on all versions.
+ Query across all online user databases for TDE usage — the only feature
+ still Enterprise-only since 2016 SP1 (Compression, Partitioning,
+ ColumnStoreIndex are all available in Standard).
+ */
using var featCmd = new SqlCommand(@"
+DECLARE
+ @sql nvarchar(max) = N'';
+
SELECT
- DB_NAME(database_id) AS database_name,
- feature_name
-FROM sys.dm_db_persisted_sku_features", sqlConn);
+ @sql += N'
+SELECT ' + QUOTENAME(name, '''') + N' AS database_name
+FROM ' + QUOTENAME(name) + N'.sys.dm_db_persisted_sku_features
+WHERE feature_name = N''TransparentDataEncryption''
+UNION ALL'
+FROM sys.databases
+WHERE database_id > 4
+AND state_desc = N'ONLINE';
+
+IF @sql <> N''
+BEGIN
+ SET @sql = LEFT(@sql, LEN(@sql) - 10);
+ EXEC sys.sp_executesql @sql;
+END;", sqlConn);
featCmd.CommandTimeout = 30;
- var features = new List();
+ var tdeDbNames = new List();
using var featReader = await featCmd.ExecuteReaderAsync();
while (await featReader.ReadAsync())
{
- var db = featReader.IsDBNull(0) ? "" : featReader.GetString(0);
- var feat = featReader.IsDBNull(1) ? "" : featReader.GetString(1);
- features.Add($"{db}: {feat}");
+ if (!featReader.IsDBNull(0))
+ tdeDbNames.Add(featReader.GetString(0));
}
- if (features.Count == 0)
+ if (tdeDbNames.Count == 0)
{
recommendations.Add(new RecommendationRow
{
@@ -1593,23 +1612,23 @@ public async Task> GetRecommendationsAsync(int serverId,
Severity = "High",
Confidence = "High",
Finding = "Enterprise Edition with no Enterprise-only features detected",
- Detail = "sys.dm_db_persisted_sku_features reports no Enterprise-only feature usage. " +
+ Detail = "No databases use Transparent Data Encryption (TDE), the only feature " +
+ "still restricted to Enterprise Edition since SQL Server 2016 SP1. " +
"Review whether Standard Edition would meet workload requirements for potential license savings.",
EstMonthlySavings = monthlyCost > 0 ? monthlyCost * 0.40m : null
});
}
else
{
- // Check 8: Enterprise feature detail report — list what blocks a downgrade
recommendations.Add(new RecommendationRow
{
Category = "Licensing",
Severity = "Low",
Confidence = "High",
- Finding = "Enterprise features in use — downgrade blockers identified",
- Detail = $"The following databases use Enterprise-only features: {string.Join("; ", features.Take(20))}" +
- (features.Count > 20 ? $" and {features.Count - 20} more" : "") +
- ". Address these before considering a Standard Edition downgrade."
+ Finding = "TDE in use — Enterprise Edition downgrade blocker",
+ Detail = $"The following databases use Transparent Data Encryption: {string.Join(", ", tdeDbNames.Take(20))}" +
+ (tdeDbNames.Count > 20 ? $" and {tdeDbNames.Count - 20} more" : "") +
+ ". TDE must be removed before downgrading to Standard Edition."
});
// Check 10: License cost impact estimate (only when features ARE in use)
diff --git a/Lite/Services/LocalDataService.QueryStats.cs b/Lite/Services/LocalDataService.QueryStats.cs
index 9b3fc93..4822c0a 100644
--- a/Lite/Services/LocalDataService.QueryStats.cs
+++ b/Lite/Services/LocalDataService.QueryStats.cs
@@ -184,6 +184,122 @@ ORDER BY SUM(delta_elapsed_time) DESC
return items;
}
+ ///
+ /// Gets query stats comparison between a current time range and a baseline range.
+ /// Returns delta percentages for duration, CPU, reads, and execution count.
+ ///
+ public async Task> GetQueryStatsComparisonAsync(
+ int serverId,
+ DateTime currentStart, DateTime currentEnd,
+ DateTime baselineStart, DateTime baselineEnd)
+ {
+ using var _q = TimeQuery("GetQueryStatsComparisonAsync", "v_query_stats comparison");
+ using var connection = await OpenConnectionAsync();
+ using var command = connection.CreateCommand();
+
+ command.CommandText = @"
+WITH top_current AS (
+ SELECT query_hash, database_name
+ FROM v_query_stats
+ WHERE server_id = $1
+ AND collection_time >= $2 AND collection_time <= $3
+ AND delta_execution_count > 0
+ GROUP BY query_hash, database_name
+ ORDER BY SUM(delta_execution_count) DESC
+ LIMIT 100
+),
+top_baseline AS (
+ SELECT query_hash, database_name
+ FROM v_query_stats
+ WHERE server_id = $1
+ AND collection_time >= $4 AND collection_time <= $5
+ AND delta_execution_count > 0
+ GROUP BY query_hash, database_name
+ ORDER BY SUM(delta_execution_count) DESC
+ LIMIT 100
+),
+top_hashes AS (
+ SELECT DISTINCT query_hash, database_name
+ FROM (
+ SELECT * FROM top_current
+ UNION ALL
+ SELECT * FROM top_baseline
+ ) combined
+),
+current_period AS (
+ SELECT th.database_name, th.query_hash,
+ SUM(qs.delta_execution_count) AS exec_count,
+ SUM(qs.delta_elapsed_time)::DOUBLE / NULLIF(SUM(qs.delta_execution_count), 0) / 1000.0 AS avg_duration_ms,
+ SUM(qs.delta_worker_time)::DOUBLE / NULLIF(SUM(qs.delta_execution_count), 0) / 1000.0 AS avg_cpu_ms,
+ SUM(qs.delta_physical_reads)::DOUBLE / NULLIF(SUM(qs.delta_execution_count), 0) AS avg_reads,
+ MAX(qs.query_text) AS query_text
+ FROM top_hashes th
+ INNER JOIN v_query_stats qs
+ ON qs.query_hash IS NOT DISTINCT FROM th.query_hash
+ AND qs.database_name IS NOT DISTINCT FROM th.database_name
+ WHERE qs.server_id = $1
+ AND qs.collection_time >= $2 AND qs.collection_time <= $3
+ AND qs.delta_execution_count > 0
+ GROUP BY th.database_name, th.query_hash
+),
+baseline_period AS (
+ SELECT th.database_name, th.query_hash,
+ SUM(qs.delta_execution_count) AS exec_count,
+ SUM(qs.delta_elapsed_time)::DOUBLE / NULLIF(SUM(qs.delta_execution_count), 0) / 1000.0 AS avg_duration_ms,
+ SUM(qs.delta_worker_time)::DOUBLE / NULLIF(SUM(qs.delta_execution_count), 0) / 1000.0 AS avg_cpu_ms,
+ SUM(qs.delta_physical_reads)::DOUBLE / NULLIF(SUM(qs.delta_execution_count), 0) AS avg_reads,
+ MAX(qs.query_text) AS query_text
+ FROM top_hashes th
+ INNER JOIN v_query_stats qs
+ ON qs.query_hash IS NOT DISTINCT FROM th.query_hash
+ AND qs.database_name IS NOT DISTINCT FROM th.database_name
+ WHERE qs.server_id = $1
+ AND qs.collection_time >= $4 AND qs.collection_time <= $5
+ AND qs.delta_execution_count > 0
+ GROUP BY th.database_name, th.query_hash
+)
+SELECT COALESCE(c.database_name, b.database_name) AS database_name,
+ COALESCE(c.query_hash, b.query_hash) AS query_hash,
+ COALESCE(c.query_text, b.query_text) AS query_text,
+ c.exec_count, c.avg_duration_ms, c.avg_cpu_ms, c.avg_reads,
+ b.exec_count AS baseline_exec_count,
+ b.avg_duration_ms AS baseline_avg_duration_ms,
+ b.avg_cpu_ms AS baseline_avg_cpu_ms,
+ b.avg_reads AS baseline_avg_reads
+FROM current_period c
+FULL OUTER JOIN baseline_period b
+ ON c.database_name IS NOT DISTINCT FROM b.database_name
+ AND c.query_hash IS NOT DISTINCT FROM b.query_hash;";
+
+ command.Parameters.Add(new DuckDBParameter { Value = serverId });
+ command.Parameters.Add(new DuckDBParameter { Value = currentStart });
+ command.Parameters.Add(new DuckDBParameter { Value = currentEnd });
+ command.Parameters.Add(new DuckDBParameter { Value = baselineStart });
+ command.Parameters.Add(new DuckDBParameter { Value = baselineEnd });
+
+ var items = new List();
+ using var reader = await command.ExecuteReaderAsync();
+ while (await reader.ReadAsync())
+ {
+ items.Add(new Models.QueryStatsComparisonItem
+ {
+ DatabaseName = reader.IsDBNull(0) ? "" : reader.GetString(0),
+ QueryHash = reader.IsDBNull(1) ? "" : reader.GetString(1),
+ QueryText = reader.IsDBNull(2) ? "" : reader.GetString(2),
+ ExecutionCount = reader.IsDBNull(3) ? 0 : ToInt64(reader.GetValue(3)),
+ AvgDurationMs = reader.IsDBNull(4) ? 0 : ToDouble(reader.GetValue(4)),
+ AvgCpuMs = reader.IsDBNull(5) ? 0 : ToDouble(reader.GetValue(5)),
+ AvgReads = reader.IsDBNull(6) ? 0 : ToDouble(reader.GetValue(6)),
+ BaselineExecutionCount = reader.IsDBNull(7) ? 0 : ToInt64(reader.GetValue(7)),
+ BaselineAvgDurationMs = reader.IsDBNull(8) ? 0 : ToDouble(reader.GetValue(8)),
+ BaselineAvgCpuMs = reader.IsDBNull(9) ? 0 : ToDouble(reader.GetValue(9)),
+ BaselineAvgReads = reader.IsDBNull(10) ? 0 : ToDouble(reader.GetValue(10)),
+ });
+ }
+
+ return items;
+ }
+
///
/// Gets collection-level history for a specific query hash (for drilldown).
///
@@ -574,6 +690,123 @@ ORDER BY SUM(delta_elapsed_time) DESC
return items;
}
+
+ ///
+ /// Gets procedure stats comparison between a current time range and a baseline range.
+ ///
+ public async Task> GetProcedureStatsComparisonAsync(
+ int serverId,
+ DateTime currentStart, DateTime currentEnd,
+ DateTime baselineStart, DateTime baselineEnd)
+ {
+ using var _q = TimeQuery("GetProcedureStatsComparisonAsync", "v_procedure_stats comparison");
+ using var connection = await OpenConnectionAsync();
+ using var command = connection.CreateCommand();
+
+ command.CommandText = @"
+WITH top_current AS (
+ SELECT database_name, schema_name, object_name
+ FROM v_procedure_stats
+ WHERE server_id = $1
+ AND collection_time >= $2 AND collection_time <= $3
+ AND delta_execution_count > 0
+ GROUP BY database_name, schema_name, object_name
+ ORDER BY SUM(delta_execution_count) DESC
+ LIMIT 100
+),
+top_baseline AS (
+ SELECT database_name, schema_name, object_name
+ FROM v_procedure_stats
+ WHERE server_id = $1
+ AND collection_time >= $4 AND collection_time <= $5
+ AND delta_execution_count > 0
+ GROUP BY database_name, schema_name, object_name
+ ORDER BY SUM(delta_execution_count) DESC
+ LIMIT 100
+),
+top_procs AS (
+ SELECT DISTINCT database_name, schema_name, object_name
+ FROM (
+ SELECT * FROM top_current
+ UNION ALL
+ SELECT * FROM top_baseline
+ ) combined
+),
+current_period AS (
+ SELECT tp.database_name, tp.schema_name, tp.object_name,
+ SUM(ps.delta_execution_count) AS exec_count,
+ SUM(ps.delta_elapsed_time)::DOUBLE / NULLIF(SUM(ps.delta_execution_count), 0) / 1000.0 AS avg_duration_ms,
+ SUM(ps.delta_worker_time)::DOUBLE / NULLIF(SUM(ps.delta_execution_count), 0) / 1000.0 AS avg_cpu_ms,
+ SUM(ps.delta_physical_reads)::DOUBLE / NULLIF(SUM(ps.delta_execution_count), 0) AS avg_reads
+ FROM top_procs tp
+ INNER JOIN v_procedure_stats ps
+ ON ps.database_name IS NOT DISTINCT FROM tp.database_name
+ AND ps.schema_name IS NOT DISTINCT FROM tp.schema_name
+ AND ps.object_name IS NOT DISTINCT FROM tp.object_name
+ WHERE ps.server_id = $1
+ AND ps.collection_time >= $2 AND ps.collection_time <= $3
+ AND ps.delta_execution_count > 0
+ GROUP BY tp.database_name, tp.schema_name, tp.object_name
+),
+baseline_period AS (
+ SELECT tp.database_name, tp.schema_name, tp.object_name,
+ SUM(ps.delta_execution_count) AS exec_count,
+ SUM(ps.delta_elapsed_time)::DOUBLE / NULLIF(SUM(ps.delta_execution_count), 0) / 1000.0 AS avg_duration_ms,
+ SUM(ps.delta_worker_time)::DOUBLE / NULLIF(SUM(ps.delta_execution_count), 0) / 1000.0 AS avg_cpu_ms,
+ SUM(ps.delta_physical_reads)::DOUBLE / NULLIF(SUM(ps.delta_execution_count), 0) AS avg_reads
+ FROM top_procs tp
+ INNER JOIN v_procedure_stats ps
+ ON ps.database_name IS NOT DISTINCT FROM tp.database_name
+ AND ps.schema_name IS NOT DISTINCT FROM tp.schema_name
+ AND ps.object_name IS NOT DISTINCT FROM tp.object_name
+ WHERE ps.server_id = $1
+ AND ps.collection_time >= $4 AND ps.collection_time <= $5
+ AND ps.delta_execution_count > 0
+ GROUP BY tp.database_name, tp.schema_name, tp.object_name
+)
+SELECT COALESCE(c.database_name, b.database_name) AS database_name,
+ COALESCE(c.schema_name, b.schema_name) AS schema_name,
+ COALESCE(c.object_name, b.object_name) AS object_name,
+ c.exec_count, c.avg_duration_ms, c.avg_cpu_ms, c.avg_reads,
+ b.exec_count AS baseline_exec_count,
+ b.avg_duration_ms AS baseline_avg_duration_ms,
+ b.avg_cpu_ms AS baseline_avg_cpu_ms,
+ b.avg_reads AS baseline_avg_reads
+FROM current_period c
+FULL OUTER JOIN baseline_period b
+ ON c.database_name IS NOT DISTINCT FROM b.database_name
+ AND c.schema_name IS NOT DISTINCT FROM b.schema_name
+ AND c.object_name IS NOT DISTINCT FROM b.object_name;";
+
+ command.Parameters.Add(new DuckDBParameter { Value = serverId });
+ command.Parameters.Add(new DuckDBParameter { Value = currentStart });
+ command.Parameters.Add(new DuckDBParameter { Value = currentEnd });
+ command.Parameters.Add(new DuckDBParameter { Value = baselineStart });
+ command.Parameters.Add(new DuckDBParameter { Value = baselineEnd });
+
+ var items = new List();
+ using var reader = await command.ExecuteReaderAsync();
+ while (await reader.ReadAsync())
+ {
+ items.Add(new Models.ProcedureStatsComparisonItem
+ {
+ DatabaseName = reader.IsDBNull(0) ? "" : reader.GetString(0),
+ SchemaName = reader.IsDBNull(1) ? "" : reader.GetString(1),
+ ObjectName = reader.IsDBNull(2) ? "" : reader.GetString(2),
+ ExecutionCount = reader.IsDBNull(3) ? 0 : ToInt64(reader.GetValue(3)),
+ AvgDurationMs = reader.IsDBNull(4) ? 0 : ToDouble(reader.GetValue(4)),
+ AvgCpuMs = reader.IsDBNull(5) ? 0 : ToDouble(reader.GetValue(5)),
+ AvgReads = reader.IsDBNull(6) ? 0 : ToDouble(reader.GetValue(6)),
+ BaselineExecutionCount = reader.IsDBNull(7) ? 0 : ToInt64(reader.GetValue(7)),
+ BaselineAvgDurationMs = reader.IsDBNull(8) ? 0 : ToDouble(reader.GetValue(8)),
+ BaselineAvgCpuMs = reader.IsDBNull(9) ? 0 : ToDouble(reader.GetValue(9)),
+ BaselineAvgReads = reader.IsDBNull(10) ? 0 : ToDouble(reader.GetValue(10)),
+ });
+ }
+
+ return items;
+ }
+
///
/// Gets query duration trend — total elapsed time per collection snapshot.
///
diff --git a/Lite/Services/LocalDataService.QueryStore.cs b/Lite/Services/LocalDataService.QueryStore.cs
index 6b5a8de..b19df4b 100644
--- a/Lite/Services/LocalDataService.QueryStore.cs
+++ b/Lite/Services/LocalDataService.QueryStore.cs
@@ -161,6 +161,122 @@ ORDER BY SUM(execution_count) * AVG(CAST(avg_duration_us AS DOUBLE)) DESC
return items;
}
+ ///
+ /// Gets query store comparison between a current time range and a baseline range.
+ /// Uses weighted averages (execution_count * avg_metric) for accurate aggregation.
+ ///
+ public async Task> GetQueryStoreComparisonAsync(
+ int serverId,
+ DateTime currentStart, DateTime currentEnd,
+ DateTime baselineStart, DateTime baselineEnd)
+ {
+ using var _q = TimeQuery("GetQueryStoreComparisonAsync", "v_query_store_stats comparison");
+ using var connection = await OpenConnectionAsync();
+ using var command = connection.CreateCommand();
+
+ command.CommandText = @"
+WITH top_current AS (
+ SELECT database_name, query_hash
+ FROM v_query_store_stats
+ WHERE server_id = $1
+ AND collection_time >= $2 AND collection_time <= $3
+ AND execution_count > 0
+ GROUP BY database_name, query_hash
+ ORDER BY SUM(execution_count) DESC
+ LIMIT 100
+),
+top_baseline AS (
+ SELECT database_name, query_hash
+ FROM v_query_store_stats
+ WHERE server_id = $1
+ AND collection_time >= $4 AND collection_time <= $5
+ AND execution_count > 0
+ GROUP BY database_name, query_hash
+ ORDER BY SUM(execution_count) DESC
+ LIMIT 100
+),
+top_hashes AS (
+ SELECT DISTINCT database_name, query_hash
+ FROM (
+ SELECT * FROM top_current
+ UNION ALL
+ SELECT * FROM top_baseline
+ ) combined
+),
+current_period AS (
+ SELECT th.database_name, th.query_hash,
+ SUM(qs.execution_count) AS exec_count,
+ SUM(qs.execution_count * qs.avg_duration_us::DOUBLE) / NULLIF(SUM(qs.execution_count), 0) / 1000.0 AS avg_duration_ms,
+ SUM(qs.execution_count * qs.avg_cpu_time_us::DOUBLE) / NULLIF(SUM(qs.execution_count), 0) / 1000.0 AS avg_cpu_ms,
+ SUM(qs.execution_count * qs.avg_logical_io_reads::DOUBLE) / NULLIF(SUM(qs.execution_count), 0) AS avg_reads,
+ MAX(qs.query_text) AS query_text
+ FROM top_hashes th
+ INNER JOIN v_query_store_stats qs
+ ON qs.query_hash IS NOT DISTINCT FROM th.query_hash
+ AND qs.database_name IS NOT DISTINCT FROM th.database_name
+ WHERE qs.server_id = $1
+ AND qs.collection_time >= $2 AND qs.collection_time <= $3
+ AND qs.execution_count > 0
+ GROUP BY th.database_name, th.query_hash
+),
+baseline_period AS (
+ SELECT th.database_name, th.query_hash,
+ SUM(qs.execution_count) AS exec_count,
+ SUM(qs.execution_count * qs.avg_duration_us::DOUBLE) / NULLIF(SUM(qs.execution_count), 0) / 1000.0 AS avg_duration_ms,
+ SUM(qs.execution_count * qs.avg_cpu_time_us::DOUBLE) / NULLIF(SUM(qs.execution_count), 0) / 1000.0 AS avg_cpu_ms,
+ SUM(qs.execution_count * qs.avg_logical_io_reads::DOUBLE) / NULLIF(SUM(qs.execution_count), 0) AS avg_reads,
+ MAX(qs.query_text) AS query_text
+ FROM top_hashes th
+ INNER JOIN v_query_store_stats qs
+ ON qs.query_hash IS NOT DISTINCT FROM th.query_hash
+ AND qs.database_name IS NOT DISTINCT FROM th.database_name
+ WHERE qs.server_id = $1
+ AND qs.collection_time >= $4 AND qs.collection_time <= $5
+ AND qs.execution_count > 0
+ GROUP BY th.database_name, th.query_hash
+)
+SELECT COALESCE(c.database_name, b.database_name) AS database_name,
+ COALESCE(c.query_hash, b.query_hash) AS query_hash,
+ COALESCE(c.query_text, b.query_text) AS query_text,
+ c.exec_count, c.avg_duration_ms, c.avg_cpu_ms, c.avg_reads,
+ b.exec_count AS baseline_exec_count,
+ b.avg_duration_ms AS baseline_avg_duration_ms,
+ b.avg_cpu_ms AS baseline_avg_cpu_ms,
+ b.avg_reads AS baseline_avg_reads
+FROM current_period c
+FULL OUTER JOIN baseline_period b
+ ON c.database_name IS NOT DISTINCT FROM b.database_name
+ AND c.query_hash IS NOT DISTINCT FROM b.query_hash;";
+
+ command.Parameters.Add(new DuckDBParameter { Value = serverId });
+ command.Parameters.Add(new DuckDBParameter { Value = currentStart });
+ command.Parameters.Add(new DuckDBParameter { Value = currentEnd });
+ command.Parameters.Add(new DuckDBParameter { Value = baselineStart });
+ command.Parameters.Add(new DuckDBParameter { Value = baselineEnd });
+
+ var items = new List();
+ using var reader = await command.ExecuteReaderAsync();
+ while (await reader.ReadAsync())
+ {
+ items.Add(new Models.QueryStatsComparisonItem
+ {
+ DatabaseName = reader.IsDBNull(0) ? "" : reader.GetString(0),
+ QueryHash = reader.IsDBNull(1) ? "" : reader.GetString(1),
+ QueryText = reader.IsDBNull(2) ? "" : reader.GetString(2),
+ ExecutionCount = reader.IsDBNull(3) ? 0 : ToInt64(reader.GetValue(3)),
+ AvgDurationMs = reader.IsDBNull(4) ? 0 : ToDouble(reader.GetValue(4)),
+ AvgCpuMs = reader.IsDBNull(5) ? 0 : ToDouble(reader.GetValue(5)),
+ AvgReads = reader.IsDBNull(6) ? 0 : ToDouble(reader.GetValue(6)),
+ BaselineExecutionCount = reader.IsDBNull(7) ? 0 : ToInt64(reader.GetValue(7)),
+ BaselineAvgDurationMs = reader.IsDBNull(8) ? 0 : ToDouble(reader.GetValue(8)),
+ BaselineAvgCpuMs = reader.IsDBNull(9) ? 0 : ToDouble(reader.GetValue(9)),
+ BaselineAvgReads = reader.IsDBNull(10) ? 0 : ToDouble(reader.GetValue(10)),
+ });
+ }
+
+ return items;
+ }
+
///
/// Gets collection-level history for a specific Query Store query (for drilldown).
///
diff --git a/Lite/Services/LocalDataService.WaitStats.cs b/Lite/Services/LocalDataService.WaitStats.cs
index 7ebca42..6e83a2f 100644
--- a/Lite/Services/LocalDataService.WaitStats.cs
+++ b/Lite/Services/LocalDataService.WaitStats.cs
@@ -150,6 +150,55 @@ FROM raw
return items;
}
+ ///
+ /// Gets total wait time trend across all wait types as a single aggregated time-series.
+ /// Used by the correlated timeline lanes for a single-line wait stats overview.
+ ///
+ public async Task> GetTotalWaitTrendAsync(int serverId, int hoursBack = 24, DateTime? fromDate = null, DateTime? toDate = null)
+ {
+ using var _q = TimeQuery("GetTotalWaitTrendAsync", "wait_stats total trend");
+ using var connection = await OpenConnectionAsync();
+ using var command = connection.CreateCommand();
+
+ var (startTime, endTime) = GetTimeRange(hoursBack, fromDate, toDate);
+
+ command.CommandText = @"
+WITH per_collection AS
+(
+ SELECT
+ collection_time,
+ SUM(delta_wait_time_ms) AS total_delta_ms,
+ date_diff('second', LAG(collection_time) OVER (ORDER BY collection_time), collection_time) AS interval_seconds
+ FROM v_wait_stats
+ WHERE server_id = $1
+ AND collection_time >= $2
+ AND collection_time <= $3
+ GROUP BY collection_time
+)
+SELECT
+ collection_time,
+ CASE WHEN interval_seconds > 0 THEN CAST(total_delta_ms AS DOUBLE) / interval_seconds ELSE 0 END AS wait_time_ms_per_second
+FROM per_collection
+ORDER BY collection_time";
+
+ command.Parameters.Add(new DuckDBParameter { Value = serverId });
+ command.Parameters.Add(new DuckDBParameter { Value = startTime });
+ command.Parameters.Add(new DuckDBParameter { Value = endTime });
+
+ var items = new List();
+ using var reader = await command.ExecuteReaderAsync();
+ while (await reader.ReadAsync())
+ {
+ items.Add(new WaitStatsTrendPoint
+ {
+ CollectionTime = reader.GetDateTime(0),
+ WaitTimeMsPerSecond = reader.IsDBNull(1) ? 0 : reader.GetDouble(1)
+ });
+ }
+
+ return items;
+ }
+
///
/// Gets the latest poison wait deltas for alert checking.
/// Returns entries where delta_waiting_tasks > 0 with computed avg ms per wait.
@@ -418,6 +467,7 @@ public async Task> GetLongRunningQueriesAsync(
FROM v_query_snapshots AS r
WHERE r.server_id = $1
AND r.collection_time = (SELECT MAX(vqs.collection_time) FROM v_query_snapshots AS vqs WHERE vqs.server_id = $1)
+ AND r.collection_time >= NOW() - INTERVAL '10 MINUTES'
AND r.session_id > 50
{spServerDiagnosticsFilter}
{waitForFilter}
diff --git a/Lite/Services/PlanAnalyzer.cs b/Lite/Services/PlanAnalyzer.cs
index 227ef4b..0ead490 100644
--- a/Lite/Services/PlanAnalyzer.cs
+++ b/Lite/Services/PlanAnalyzer.cs
@@ -38,23 +38,78 @@ public static void Analyze(ParsedPlan plan)
private static void AnalyzeStatement(PlanStatement stmt)
{
// Rule 3: Serial plan with reason
- if (!string.IsNullOrEmpty(stmt.NonParallelPlanReason))
+ // Skip: trivial cost (< 0.01), TRIVIAL optimization (can't go parallel anyway),
+ // and 0ms actual elapsed time (not worth flagging).
+ if (!string.IsNullOrEmpty(stmt.NonParallelPlanReason)
+ && stmt.StatementSubTreeCost >= 0.01
+ && stmt.StatementOptmLevel != "TRIVIAL"
+ && !(stmt.QueryTimeStats != null && stmt.QueryTimeStats.ElapsedTimeMs == 0))
{
var reason = stmt.NonParallelPlanReason switch
{
+ // User/config forced serial
"MaxDOPSetToOne" => "MAXDOP is set to 1",
+ "QueryHintNoParallelSet" => "OPTION (MAXDOP 1) hint forces serial execution",
+ "ParallelismDisabledByTraceFlag" => "Parallelism disabled by trace flag",
+
+ // Passive — optimizer chose serial, nothing wrong
"EstimatedDOPIsOne" => "Estimated DOP is 1 (the plan's estimated cost was below the cost threshold for parallelism)",
+
+ // Edition/environment limitations
"NoParallelPlansInDesktopOrExpressEdition" => "Express/Desktop edition does not support parallelism",
+ "NoParallelCreateIndexInNonEnterpriseEdition" => "Parallel index creation requires Enterprise edition",
+ "NoParallelPlansDuringUpgrade" => "Parallel plans disabled during upgrade",
+ "NoParallelForPDWCompilation" => "Parallel plans not supported for PDW compilation",
+ "NoParallelForCloudDBReplication" => "Parallel plans not supported during cloud DB replication",
+
+ // Query constructs that block parallelism (actionable)
"CouldNotGenerateValidParallelPlan" => "Optimizer could not generate a valid parallel plan. Common causes: scalar UDFs, inserts into table variables, certain system functions, or OPTION (MAXDOP 1) hints",
- "QueryHintNoParallelSet" => "OPTION (MAXDOP 1) hint forces serial execution",
+ "TSQLUserDefinedFunctionsNotParallelizable" => "T-SQL scalar UDF prevents parallelism. Rewrite as an inline table-valued function, or on SQL Server 2019+ check if the UDF is eligible for automatic inlining",
+ "CLRUserDefinedFunctionRequiresDataAccess" => "CLR UDF with data access prevents parallelism",
+ "NonParallelizableIntrinsicFunction" => "Non-parallelizable intrinsic function in the query",
+ "TableVariableTransactionsDoNotSupportParallelNestedTransaction" => "Table variable transaction prevents parallelism. Consider using a #temp table instead",
+ "UpdatingWritebackVariable" => "Updating a writeback variable prevents parallelism",
+ "DMLQueryReturnsOutputToClient" => "DML with OUTPUT clause returning results to client prevents parallelism",
+ "MixedSerialAndParallelOnlineIndexBuildNotSupported" => "Mixed serial/parallel online index build not supported",
+ "NoRangesResumableCreate" => "Resumable index create cannot use parallelism for this operation",
+
+ // Cursor limitations
+ "NoParallelCursorFetchByBookmark" => "Cursor fetch by bookmark cannot use parallelism",
+ "NoParallelDynamicCursor" => "Dynamic cursors cannot use parallelism",
+ "NoParallelFastForwardCursor" => "Fast-forward cursors cannot use parallelism",
+
+ // Memory-optimized / natively compiled
+ "NoParallelForMemoryOptimizedTables" => "Memory-optimized tables do not support parallel plans",
+ "NoParallelForDmlOnMemoryOptimizedTable" => "DML on memory-optimized tables cannot use parallelism",
+ "NoParallelForNativelyCompiledModule" => "Natively compiled modules do not support parallelism",
+
+ // Remote queries
+ "NoParallelWithRemoteQuery" => "Remote queries cannot use parallelism",
+ "NoRemoteParallelismForMatrix" => "Remote parallelism not available for this query shape",
+
_ => stmt.NonParallelPlanReason
};
+ var isActionable = stmt.NonParallelPlanReason is
+ "MaxDOPSetToOne" or "QueryHintNoParallelSet" or "ParallelismDisabledByTraceFlag"
+ or "CouldNotGenerateValidParallelPlan"
+ or "TSQLUserDefinedFunctionsNotParallelizable"
+ or "CLRUserDefinedFunctionRequiresDataAccess"
+ or "NonParallelizableIntrinsicFunction"
+ or "TableVariableTransactionsDoNotSupportParallelNestedTransaction"
+ or "UpdatingWritebackVariable"
+ or "DMLQueryReturnsOutputToClient"
+ or "NoParallelCursorFetchByBookmark"
+ or "NoParallelDynamicCursor"
+ or "NoParallelFastForwardCursor"
+ or "NoParallelWithRemoteQuery"
+ or "NoRemoteParallelismForMatrix";
+
stmt.PlanWarnings.Add(new PlanWarning
{
WarningType = "Serial Plan",
Message = $"Query running serially: {reason}.",
- Severity = PlanWarningSeverity.Warning
+ Severity = isActionable ? PlanWarningSeverity.Warning : PlanWarningSeverity.Info
});
}
@@ -140,7 +195,7 @@ private static void AnalyzeStatement(PlanStatement stmt)
stmt.PlanWarnings.Add(new PlanWarning
{
WarningType = "UDF Execution",
- Message = $"Scalar UDF cost in this statement: {stmt.QueryUdfElapsedTimeMs:N0}ms elapsed, {stmt.QueryUdfCpuTimeMs:N0}ms CPU. Scalar UDFs run once per row and prevent parallelism. Rewrite as an inline table-valued function, or dump results to a #temp table and apply the UDF only to the final result set.",
+ Message = $"Scalar UDF cost in this statement: {stmt.QueryUdfElapsedTimeMs:N0}ms elapsed, {stmt.QueryUdfCpuTimeMs:N0}ms CPU. Scalar UDFs run once per row and prevent parallelism. Options: rewrite as an inline table-valued function, assign the result to a variable if only one row is needed, dump results to a #temp table and apply the UDF to the final result set, or on SQL Server 2019+ check if the UDF is eligible for automatic scalar UDF inlining.",
Severity = stmt.QueryUdfElapsedTimeMs >= 1000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
});
}
@@ -148,7 +203,8 @@ private static void AnalyzeStatement(PlanStatement stmt)
// Rule 20: Local variables without RECOMPILE
// Parameters with no CompiledValue are likely local variables — the optimizer
// cannot sniff their values and uses density-based ("unknown") estimates.
- if (stmt.Parameters.Count > 0)
+ // Skip trivial statements (simple variable assignments) where estimate quality doesn't matter.
+ if (stmt.Parameters.Count > 0 && stmt.StatementSubTreeCost >= 0.01)
{
var unsnifffedParams = stmt.Parameters
.Where(p => string.IsNullOrEmpty(p.CompiledValue))
@@ -352,21 +408,42 @@ private static void AnalyzeNode(PlanNode node, PlanStatement stmt)
{
// Rule 1: Filter operators — rows survived the tree just to be discarded
// Quantify the impact by summing child subtree cost (reads, CPU, time).
- if (node.PhysicalOp == "Filter" && !string.IsNullOrEmpty(node.Predicate))
+ // Suppress when the filter's child subtree is trivial (low I/O, fast, cheap).
+ if (node.PhysicalOp == "Filter" && !string.IsNullOrEmpty(node.Predicate)
+ && node.Children.Count > 0)
{
- var impact = QuantifyFilterImpact(node);
- var predicate = Truncate(node.Predicate, 200);
- var message = "Filter operator discarding rows late in the plan.";
- if (!string.IsNullOrEmpty(impact))
- message += $"\n{impact}";
- message += $"\nPredicate: {predicate}";
+ // Gate: skip trivial filters based on actual stats or estimated cost
+ bool isTrivial;
+ if (node.HasActualStats)
+ {
+ long childReads = 0;
+ foreach (var child in node.Children)
+ childReads += SumSubtreeReads(child);
+ var childElapsed = node.Children.Max(c => c.ActualElapsedMs);
+ isTrivial = childReads < 128 && childElapsed < 10;
+ }
+ else
+ {
+ var childCost = node.Children.Sum(c => c.EstimatedTotalSubtreeCost);
+ isTrivial = childCost < 1.0;
+ }
- node.Warnings.Add(new PlanWarning
+ if (!isTrivial)
{
- WarningType = "Filter Operator",
- Message = message,
- Severity = PlanWarningSeverity.Warning
- });
+ var impact = QuantifyFilterImpact(node);
+ var predicate = Truncate(node.Predicate, 200);
+ var message = "Filter operator discarding rows late in the plan.";
+ if (!string.IsNullOrEmpty(impact))
+ message += $"\n{impact}";
+ message += $"\nPredicate: {predicate}";
+
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Filter Operator",
+ Message = message,
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
}
// Rule 2: Eager Index Spools — optimizer building temporary indexes on the fly
@@ -391,7 +468,7 @@ private static void AnalyzeNode(PlanNode node, PlanStatement stmt)
node.Warnings.Add(new PlanWarning
{
WarningType = "UDF Execution",
- Message = $"Scalar UDF executing on this operator ({node.UdfElapsedTimeMs:N0}ms elapsed, {node.UdfCpuTimeMs:N0}ms CPU). Scalar UDFs run once per row and prevent parallelism. Rewrite as an inline table-valued function, or dump the query results to a #temp table first and apply the UDF only to the final result set.",
+ Message = $"Scalar UDF executing on this operator ({node.UdfElapsedTimeMs:N0}ms elapsed, {node.UdfCpuTimeMs:N0}ms CPU). Scalar UDFs run once per row and prevent parallelism. Options: rewrite as an inline table-valued function, assign the result to a variable if only one row is needed, dump results to a #temp table and apply the UDF to the final result set, or on SQL Server 2019+ check if the UDF is eligible for automatic scalar UDF inlining.",
Severity = node.UdfElapsedTimeMs >= 1000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
});
}
@@ -451,7 +528,7 @@ private static void AnalyzeNode(PlanNode node, PlanStatement stmt)
node.Warnings.Add(new PlanWarning
{
WarningType = "Scalar UDF",
- Message = $"Scalar {type} UDF: {udf.FunctionName}. Scalar UDFs run once per row and prevent parallelism. Rewrite as an inline table-valued function, or dump results to a #temp table and apply the UDF only to the final result set.",
+ Message = $"Scalar {type} UDF: {udf.FunctionName}. Scalar UDFs run once per row and prevent parallelism. Options: rewrite as an inline table-valued function, assign the result to a variable if only one row is needed, dump results to a #temp table and apply the UDF to the final result set, or on SQL Server 2019+ check if the UDF is eligible for automatic scalar UDF inlining.",
Severity = PlanWarningSeverity.Warning
});
}
@@ -829,12 +906,17 @@ _ when nonSargableReason.StartsWith("Function call", StringComparison.OrdinalIgn
node.EstimateRowsWithoutRowGoal > node.EstimateRows)
{
var reduction = node.EstimateRowsWithoutRowGoal / node.EstimateRows;
- node.Warnings.Add(new PlanWarning
+ // Require at least a 2x reduction to be worth mentioning — "1 to 1" or
+ // tiny floating-point differences that display identically are noise
+ if (reduction >= 2.0)
{
- WarningType = "Row Goal",
- Message = $"Row goal active: estimate reduced from {node.EstimateRowsWithoutRowGoal:N0} to {node.EstimateRows:N0} ({reduction:N0}x reduction) due to TOP, EXISTS, IN, or FAST hint. The optimizer chose this plan shape expecting to stop reading early. If the query reads all rows anyway, the plan choice may be suboptimal.",
- Severity = PlanWarningSeverity.Info
- });
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Row Goal",
+ Message = $"Row goal active: estimate reduced from {node.EstimateRowsWithoutRowGoal:N0} to {node.EstimateRows:N0} ({reduction:N0}x reduction) due to TOP, EXISTS, IN, or FAST hint. The optimizer chose this plan shape expecting to stop reading early. If the query reads all rows anyway, the plan choice may be suboptimal.",
+ Severity = PlanWarningSeverity.Info
+ });
+ }
}
// Rule 28: Row Count Spool — NOT IN with nullable column
@@ -1066,6 +1148,13 @@ private static bool IsOrExpansionChain(PlanNode concatenationNode)
if (parent == null || parent.PhysicalOp != "Nested Loops")
return false;
+ // If this Nested Loops is inside an Anti/Semi Join, this is a NOT IN/IN
+ // subquery pattern (Merge Interval optimizing range lookups), not an OR expansion
+ var nlParent = parent.Parent;
+ if (nlParent != null && nlParent.LogicalOp != null &&
+ nlParent.LogicalOp.Contains("Semi"))
+ return false;
+
return true;
}
diff --git a/Lite/Services/ShowPlanParser.cs b/Lite/Services/ShowPlanParser.cs
index b30fb0f..1e825e9 100644
--- a/Lite/Services/ShowPlanParser.cs
+++ b/Lite/Services/ShowPlanParser.cs
@@ -972,6 +972,22 @@ private static PlanNode ParseRelOp(XElement relOpEl)
if (actionColEl != null)
node.ActionColumn = FormatColumnRef(actionColEl);
+ // Nonclustered indexes maintained by modification operators (Update/SimpleUpdate/CreateIndex)
+ var opName = physicalOpEl.Name.LocalName;
+ if (opName is "Update" or "SimpleUpdate" or "CreateIndex")
+ {
+ var ncObjects = ScopedDescendants(physicalOpEl, Ns + "Object")
+ .Where(o => string.Equals(o.Attribute("IndexKind")?.Value, "NonClustered", StringComparison.OrdinalIgnoreCase))
+ .ToList();
+ node.NonClusteredIndexCount = ncObjects.Count;
+ foreach (var ncObj in ncObjects)
+ {
+ var ixName = ncObj.Attribute("Index")?.Value?.Replace("[", "").Replace("]", "");
+ if (!string.IsNullOrEmpty(ixName))
+ node.NonClusteredIndexNames.Add(ixName);
+ }
+ }
+
// SET predicate (UPDATE operator)
var setPredicateEl = physicalOpEl.Element(Ns + "SetPredicate");
if (setPredicateEl != null)
@@ -1616,7 +1632,8 @@ private static List ParseWarningsFromElement(XElement warningsEl)
});
}
- // Memory grant warning
+ // Memory grant warning (from plan XML) — gate at 1 GB to avoid noise on small grants
+ // All values are in KB, consistent with MemoryGrantInfo element
var memWarnEl = warningsEl.Element(Ns + "MemoryGrantWarning");
if (memWarnEl != null)
{
@@ -1624,12 +1641,17 @@ private static List ParseWarningsFromElement(XElement warningsEl)
var requested = ParseLong(memWarnEl.Attribute("RequestedMemory")?.Value);
var granted = ParseLong(memWarnEl.Attribute("GrantedMemory")?.Value);
var maxUsed = ParseLong(memWarnEl.Attribute("MaxUsedMemory")?.Value);
- result.Add(new PlanWarning
+ if (granted >= 1048576) // 1 GB in KB
{
- WarningType = "Memory Grant",
- Message = $"{kind}: Requested {requested:N0} KB, Granted {granted:N0} KB, Used {maxUsed:N0} KB",
- Severity = PlanWarningSeverity.Warning
- });
+ var grantedMB = granted / 1024.0;
+ var usedMB = maxUsed / 1024.0;
+ result.Add(new PlanWarning
+ {
+ WarningType = "Memory Grant",
+ Message = $"{kind}: Granted {grantedMB:N0} MB, Used {usedMB:N0} MB",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
}
// Implicit conversions
diff --git a/Lite/Themes/CoolBreezeTheme.xaml b/Lite/Themes/CoolBreezeTheme.xaml
index 6bd705d..739f20b 100644
--- a/Lite/Themes/CoolBreezeTheme.xaml
+++ b/Lite/Themes/CoolBreezeTheme.xaml
@@ -22,7 +22,7 @@
#1A2A3A
- #364D61
+ #1A2A3A
#5B7A90
diff --git a/Lite/Themes/DarkTheme.xaml b/Lite/Themes/DarkTheme.xaml
index 014635c..db5aac5 100644
--- a/Lite/Themes/DarkTheme.xaml
+++ b/Lite/Themes/DarkTheme.xaml
@@ -22,7 +22,7 @@
#E4E6EB
- #9DA5B4
+ #E4E6EB
#6B7280
@@ -1213,7 +1213,7 @@
-
+
diff --git a/Lite/Themes/LightTheme.xaml b/Lite/Themes/LightTheme.xaml
index dc2dab7..063b352 100644
--- a/Lite/Themes/LightTheme.xaml
+++ b/Lite/Themes/LightTheme.xaml
@@ -22,7 +22,7 @@
#1A1D23
- #4A5568
+ #1A1D23
#718096
diff --git a/Lite/Windows/ProcedureHistoryWindow.xaml.cs b/Lite/Windows/ProcedureHistoryWindow.xaml.cs
index 4f94966..4ecbd89 100644
--- a/Lite/Windows/ProcedureHistoryWindow.xaml.cs
+++ b/Lite/Windows/ProcedureHistoryWindow.xaml.cs
@@ -134,14 +134,14 @@ private static void ApplyTheme(ScottPlot.WPF.WpfPlot chart)
{
figureBackground = ScottPlot.Color.FromHex("#FFFFFF");
dataBackground = ScottPlot.Color.FromHex("#F5F7FA");
- textColor = ScottPlot.Color.FromHex("#4A5568");
+ textColor = ScottPlot.Color.FromHex("#1A1D23");
gridColor = ScottPlot.Colors.Black.WithAlpha(20);
}
else
{
figureBackground = ScottPlot.Color.FromHex("#22252b");
dataBackground = ScottPlot.Color.FromHex("#111217");
- textColor = ScottPlot.Color.FromHex("#9DA5B4");
+ textColor = ScottPlot.Color.FromHex("#E4E6EB");
gridColor = ScottPlot.Colors.White.WithAlpha(40);
}
chart.Plot.FigureBackground.Color = figureBackground;
diff --git a/Lite/Windows/QueryStatsHistoryWindow.xaml.cs b/Lite/Windows/QueryStatsHistoryWindow.xaml.cs
index c78c005..97bce98 100644
--- a/Lite/Windows/QueryStatsHistoryWindow.xaml.cs
+++ b/Lite/Windows/QueryStatsHistoryWindow.xaml.cs
@@ -191,14 +191,14 @@ private static void ApplyTheme(ScottPlot.WPF.WpfPlot chart)
{
figureBackground = ScottPlot.Color.FromHex("#FFFFFF");
dataBackground = ScottPlot.Color.FromHex("#F5F7FA");
- textColor = ScottPlot.Color.FromHex("#4A5568");
+ textColor = ScottPlot.Color.FromHex("#1A1D23");
gridColor = ScottPlot.Colors.Black.WithAlpha(20);
}
else
{
figureBackground = ScottPlot.Color.FromHex("#22252b");
dataBackground = ScottPlot.Color.FromHex("#111217");
- textColor = ScottPlot.Color.FromHex("#9DA5B4");
+ textColor = ScottPlot.Color.FromHex("#E4E6EB");
gridColor = ScottPlot.Colors.White.WithAlpha(40);
}
chart.Plot.FigureBackground.Color = figureBackground;
diff --git a/Lite/Windows/QueryStoreHistoryWindow.xaml.cs b/Lite/Windows/QueryStoreHistoryWindow.xaml.cs
index d3ccf71..00c1fbb 100644
--- a/Lite/Windows/QueryStoreHistoryWindow.xaml.cs
+++ b/Lite/Windows/QueryStoreHistoryWindow.xaml.cs
@@ -172,14 +172,14 @@ private static void ApplyTheme(ScottPlot.WPF.WpfPlot chart)
{
figureBackground = ScottPlot.Color.FromHex("#FFFFFF");
dataBackground = ScottPlot.Color.FromHex("#F5F7FA");
- textColor = ScottPlot.Color.FromHex("#4A5568");
+ textColor = ScottPlot.Color.FromHex("#1A1D23");
gridColor = ScottPlot.Colors.Black.WithAlpha(20);
}
else
{
figureBackground = ScottPlot.Color.FromHex("#22252b");
dataBackground = ScottPlot.Color.FromHex("#111217");
- textColor = ScottPlot.Color.FromHex("#9DA5B4");
+ textColor = ScottPlot.Color.FromHex("#E4E6EB");
gridColor = ScottPlot.Colors.White.WithAlpha(40);
}
chart.Plot.FigureBackground.Color = figureBackground;
diff --git a/README.md b/README.md
index bdfd3b2..9390c01 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,21 @@
# SQL Server Performance Monitor
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
**Free, open-source monitoring that replaces the tools charging you thousands per server per year.** 30+ collectors, real-time alerts, built-in MCP server for AI analysis. Nothing phones home. Your data stays on your server and your machine.
**Supported:** SQL Server 2016–2025 | Azure SQL Managed Instance | AWS RDS for SQL Server | Azure SQL Database (Lite only)
@@ -677,6 +693,17 @@ See [THIRD_PARTY_NOTICES.md](THIRD_PARTY_NOTICES.md) for complete license texts.
---
+## Sponsors
+
+
+
+---
+
## License
Copyright (c) 2026 Darling Data, LLC. Licensed under the MIT License. See [LICENSE](LICENSE) for details.
diff --git a/docs/signpath_logo.svg b/docs/signpath_logo.svg
new file mode 100644
index 0000000..7b45355
--- /dev/null
+++ b/docs/signpath_logo.svg
@@ -0,0 +1,21 @@
+
+
+ SignPath GmbH
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file