diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index a235f907..a2e88c68 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -6,7 +6,7 @@ on:
pull_request:
branches: [main, dev]
release:
- types: [created, published]
+ types: [published]
permissions:
contents: write
diff --git a/.gitignore b/.gitignore
index e5507e8d..5af2ea72 100644
--- a/.gitignore
+++ b/.gitignore
@@ -59,3 +59,6 @@ Lite/collection_schedule.json
# Plans directory
plans/
+
+# Community scripts (user-provided, not bundled)
+community/*.sql
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e0ebd159..eee63d09 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,48 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [2.7.0] - 2026-04-13
+
+### Added
+
+- **Host OS column** in Server Inventory for both Dashboard and Lite ([#748], [#823])
+- **Offline community script support** via `community/` directory for user-contributed scripts ([#814], [#822])
+- **MultiSubnetFailover connection option** in Dashboard and Lite for Always On availability groups ([#813], [#821])
+
+### Changed
+
+- **PlanAnalyzer and ShowPlanParser** synced from PerformanceStudio with latest improvements ([#816])
+- **MCP query tools** optimized for large databases ([#826])
+- **Add Server dialog UX** improved with inline connection status and full-height window
+- **"CPUs" renamed to "Logical CPUs"** for clarity in Lite ([#825])
+
+### Fixed
+
+- **Dashboard auto-refresh stalling under load** — replaced DispatcherTimer with async Task.Delay loop to prevent priority starvation during heavy chart rendering ([#833], [#834])
+- **Lite auto-refresh silently skipping** every tick ([#824])
+- **Deadlock count not resetting** between collections ([#803], [#820])
+- **Upgrade filter skipping patch versions** during version comparison ([#817], [#819])
+- **Upgrade script executing against master** instead of PerformanceMonitor database ([#828])
+- **Duplicate release builds** triggering on both created and published events
+
+[#748]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/748
+[#803]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/803
+[#813]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/813
+[#814]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/814
+[#816]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/816
+[#817]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/817
+[#819]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/819
+[#820]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/820
+[#821]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/821
+[#822]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/822
+[#823]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/823
+[#824]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/824
+[#825]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/825
+[#826]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/826
+[#828]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/828
+[#833]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/833
+[#834]: https://github.com/erikdarlingdata/PerformanceMonitor/issues/834
+
## [2.6.0] - 2026-04-08
### Added
diff --git a/Dashboard/AddServerDialog.xaml b/Dashboard/AddServerDialog.xaml
index a4c788f1..22e64dd0 100644
--- a/Dashboard/AddServerDialog.xaml
+++ b/Dashboard/AddServerDialog.xaml
@@ -2,8 +2,8 @@
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
Title="Add SQL Server"
- Height="750" Width="500"
- WindowStartupLocation="CenterOwner"
+ Width="500"
+ WindowStartupLocation="Manual"
ResizeMode="CanResizeWithGrip"
Background="{DynamicResource BackgroundBrush}"
Foreground="{DynamicResource ForegroundBrush}">
@@ -111,6 +111,12 @@
Foreground="{DynamicResource ForegroundBrush}" Margin="0,0,0,6"
ToolTip="Sets ApplicationIntent=ReadOnly. Required when connecting via an AG listener or Azure failover group endpoint to route to a readable secondary."/>
+
+
+
@@ -136,6 +142,9 @@
Background="{DynamicResource BackgroundLightBrush}"
CornerRadius="4" Padding="12" Margin="0,0,0,10">
+
diff --git a/Dashboard/AddServerDialog.xaml.cs b/Dashboard/AddServerDialog.xaml.cs
index 7595d755..dfc55e74 100644
--- a/Dashboard/AddServerDialog.xaml.cs
+++ b/Dashboard/AddServerDialog.xaml.cs
@@ -47,10 +47,12 @@ private enum DialogState
private InstallationResult? _installResult;
private string? _reportPath;
private DialogState _currentState = DialogState.Initial;
+ private string? _serverVersion;
public AddServerDialog()
{
InitializeComponent();
+ SizeToWorkArea();
_isEditMode = false;
ServerConnection = new ServerConnection();
Title = "Add SQL Server";
@@ -59,6 +61,7 @@ public AddServerDialog()
public AddServerDialog(ServerConnection existingServer)
{
InitializeComponent();
+ SizeToWorkArea();
_isEditMode = true;
ServerConnection = existingServer;
Title = "Edit SQL Server";
@@ -78,6 +81,7 @@ public AddServerDialog(ServerConnection existingServer)
};
TrustServerCertificateCheckBox.IsChecked = existingServer.TrustServerCertificate;
ReadOnlyIntentCheckBox.IsChecked = existingServer.ReadOnlyIntent;
+ MultiSubnetFailoverCheckBox.IsChecked = existingServer.MultiSubnetFailover;
if (existingServer.AuthenticationType == AuthenticationTypes.EntraMFA)
{
@@ -108,6 +112,14 @@ public AddServerDialog(ServerConnection existingServer)
}
}
+ private void SizeToWorkArea()
+ {
+ var workArea = SystemParameters.WorkArea;
+ Height = workArea.Height;
+ Top = workArea.Top;
+ Left = workArea.Left + (workArea.Width - Width) / 2;
+ }
+
private void AuthType_Changed(object sender, RoutedEventArgs e)
{
if (SqlAuthPanel != null && EntraMfaPanel != null)
@@ -154,7 +166,8 @@ private SqlConnectionStringBuilder BuildConnectionBuilder()
Encrypt = ParseEncryptOption(GetSelectedEncryptMode()),
ApplicationIntent = ReadOnlyIntentCheckBox.IsChecked == true
? ApplicationIntent.ReadOnly
- : ApplicationIntent.ReadWrite
+ : ApplicationIntent.ReadWrite,
+ MultiSubnetFailover = MultiSubnetFailoverCheckBox.IsChecked == true
};
if (WindowsAuthRadio.IsChecked == true)
@@ -339,17 +352,9 @@ private async void TestConnection_Click(object sender, RoutedEventArgs e)
if (connected)
{
- var message = serverVersion != null
- ? $"Successfully connected to {ServerNameTextBox.Text}!\n\n{serverVersion}"
- : $"Successfully connected to {ServerNameTextBox.Text}!";
- MessageBox.Show(
- message,
- "Connection Successful",
- MessageBoxButton.OK,
- MessageBoxImage.Information
- );
+ _serverVersion = serverVersion;
- /* After successful connection, check database status */
+ /* Show connection + database status inline instead of a popup */
await DetectDatabaseStatusAsync();
}
else if (mfaCancelled)
@@ -400,6 +405,10 @@ private async System.Threading.Tasks.Task DetectDatabaseStatusAsync()
if (!_coreServerInfo.IsSupportedVersion)
{
+ string serverName = ServerNameTextBox.Text;
+ ConnectionInfoText.Text = _serverVersion != null
+ ? $"Connected to {serverName} ({_serverVersion})"
+ : $"Connected to {serverName}";
DatabaseStatusText.Text = $"Warning: {_coreServerInfo.ProductMajorVersionName} is not supported. SQL Server 2016+ is required.";
DatabaseStatusPanel.Visibility = Visibility.Visible;
InstallUpgradeButton.Visibility = Visibility.Collapsed;
@@ -458,13 +467,22 @@ private void TransitionToState(DialogState newState)
ViewReportButton.Visibility = Visibility.Collapsed;
StatusText.Text = string.Empty;
StatusText.Visibility = Visibility.Collapsed;
+ ConnectionInfoText.Text = string.Empty;
InstallUpgradeButton.Visibility = Visibility.Visible;
SkipInstallText.Visibility = Visibility.Visible;
+ /* Build the connection header shown for all connected states */
+ string serverName = ServerNameTextBox.Text;
+ string connectionHeader = _serverVersion != null
+ ? $"Connected to {serverName} ({_serverVersion})"
+ : $"Connected to {serverName}";
+
switch (newState)
{
case DialogState.Connected_NoDatabase:
- DatabaseStatusText.Text = $"No PerformanceMonitor database found on this server. Install v{appVersion}?";
+ ConnectionInfoText.Text = connectionHeader;
+ DatabaseStatusText.Text = "No PerformanceMonitor database found on this server. " +
+ $"Click Install Now to create the monitoring database, collection jobs, and stored procedures (v{appVersion}).";
InstallUpgradeButton.Content = "Install Now";
DatabaseStatusPanel.Visibility = Visibility.Visible;
InstallationPanel.Visibility = Visibility.Visible;
@@ -473,7 +491,9 @@ private void TransitionToState(DialogState newState)
case DialogState.Connected_NeedsUpgrade:
string normalizedInstalled = NormalizeVersion(_installedVersion!);
- DatabaseStatusText.Text = $"v{normalizedInstalled} installed — v{appVersion} available";
+ ConnectionInfoText.Text = connectionHeader;
+ DatabaseStatusText.Text = $"PerformanceMonitor v{normalizedInstalled} is installed. " +
+ $"v{appVersion} is available — click Upgrade Now to apply the update.";
InstallUpgradeButton.Content = "Upgrade Now";
DatabaseStatusPanel.Visibility = Visibility.Visible;
InstallationPanel.Visibility = Visibility.Visible;
@@ -482,6 +502,7 @@ private void TransitionToState(DialogState newState)
case DialogState.Connected_Current:
string normalizedCurrent = NormalizeVersion(_installedVersion!);
+ ConnectionInfoText.Text = connectionHeader;
DatabaseStatusText.Text = $"PerformanceMonitor v{normalizedCurrent} is up to date.";
InstallUpgradeButton.Visibility = Visibility.Collapsed;
SkipInstallText.Visibility = Visibility.Collapsed;
@@ -616,7 +637,8 @@ private async void InstallOrUpgrade_Click(object sender, RoutedEventArgs e)
preValidationAction = async () =>
{
AppendInstallLog("Installing community dependencies...", "Info");
- using var depInstaller = new DependencyInstaller();
+ string communityDir = System.IO.Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "community");
+ using var depInstaller = new DependencyInstaller(communityDir);
await depInstaller.InstallDependenciesAsync(installerConnStr, progress, cancellationToken);
};
}
@@ -821,6 +843,7 @@ private void SetFormEnabled(bool enabled)
EncryptModeComboBox.IsEnabled = enabled;
TrustServerCertificateCheckBox.IsEnabled = enabled;
ReadOnlyIntentCheckBox.IsEnabled = enabled;
+ MultiSubnetFailoverCheckBox.IsEnabled = enabled;
IsFavoriteCheckBox.IsEnabled = enabled;
MonthlyCostTextBox.IsEnabled = enabled;
DescriptionTextBox.IsEnabled = enabled;
@@ -915,6 +938,7 @@ private async void Save_Click(object sender, RoutedEventArgs e)
ServerConnection.EncryptMode = GetSelectedEncryptMode();
ServerConnection.TrustServerCertificate = TrustServerCertificateCheckBox.IsChecked == true;
ServerConnection.ReadOnlyIntent = ReadOnlyIntentCheckBox.IsChecked == true;
+ ServerConnection.MultiSubnetFailover = MultiSubnetFailoverCheckBox.IsChecked == true;
if (decimal.TryParse(MonthlyCostTextBox.Text, System.Globalization.NumberStyles.Any, System.Globalization.CultureInfo.InvariantCulture, out var editCost) && editCost >= 0)
ServerConnection.MonthlyCostUsd = editCost;
}
@@ -936,6 +960,7 @@ private async void Save_Click(object sender, RoutedEventArgs e)
EncryptMode = GetSelectedEncryptMode(),
TrustServerCertificate = TrustServerCertificateCheckBox.IsChecked == true,
ReadOnlyIntent = ReadOnlyIntentCheckBox.IsChecked == true,
+ MultiSubnetFailover = MultiSubnetFailoverCheckBox.IsChecked == true,
MonthlyCostUsd = monthlyCost
};
}
diff --git a/Dashboard/Controls/CorrelatedTimelineLanesControl.xaml.cs b/Dashboard/Controls/CorrelatedTimelineLanesControl.xaml.cs
index 9af24f3f..8fe1e734 100644
--- a/Dashboard/Controls/CorrelatedTimelineLanesControl.xaml.cs
+++ b/Dashboard/Controls/CorrelatedTimelineLanesControl.xaml.cs
@@ -27,7 +27,6 @@ public partial class CorrelatedTimelineLanesControl : UserControl
private DatabaseService? _dataService;
private SqlServerBaselineProvider? _baselineProvider;
private CorrelatedCrosshairManager? _crosshairManager;
- private bool _isRefreshing;
public CorrelatedTimelineLanesControl()
{
@@ -66,176 +65,168 @@ public void Initialize(DatabaseService dataService, SqlServerBaselineProvider? b
public async Task RefreshAsync(int hoursBack, DateTime? fromDate, DateTime? toDate,
(DateTime From, DateTime To)? comparisonRange = null)
{
- if (_dataService == null || _isRefreshing) return;
- _isRefreshing = true;
+ if (_dataService == null) return;
+
+ _crosshairManager?.PrepareForRefresh();
+
+ var cpuTask = _dataService.GetCpuUtilizationAsync(hoursBack, fromDate, toDate);
+ var waitTask = _dataService.GetTotalWaitStatsTrendAsync(hoursBack, fromDate, toDate);
+ var blockingTask = _dataService.GetBlockedSessionTrendAsync(hoursBack, fromDate, toDate);
+ var deadlockTask = _dataService.GetDeadlockTrendAsync(hoursBack, fromDate, toDate);
+ var memoryTask = _dataService.GetMemoryStatsAsync(hoursBack, fromDate, toDate);
+ var fileIoTask = _dataService.GetFileIoLatencyTimeSeriesAsync(false, hoursBack, fromDate, toDate);
+
+ // Fetch baselines for band rendering if provider is available
+ var referenceTime = fromDate ?? DateTime.UtcNow.AddHours(-hoursBack);
+ Task? cpuBaselineTask = null;
+ Task? waitBaselineTask = null;
+ Task? ioBaselineTask = null;
+ Task? blockingBaselineTask = null;
+ Task? deadlockBaselineTask = null;
+
+ if (_baselineProvider != null)
+ {
+ cpuBaselineTask = GetBaselineAsync(SqlServerMetricNames.Cpu, referenceTime);
+ waitBaselineTask = GetBaselineAsync(SqlServerMetricNames.WaitStats, referenceTime);
+ ioBaselineTask = GetBaselineAsync(SqlServerMetricNames.IoLatency, referenceTime);
+ blockingBaselineTask = GetBaselineAsync(SqlServerMetricNames.Blocking, referenceTime);
+ deadlockBaselineTask = GetBaselineAsync(SqlServerMetricNames.Deadlock, referenceTime);
+ }
try
{
- _crosshairManager?.PrepareForRefresh();
-
- var cpuTask = _dataService.GetCpuUtilizationAsync(hoursBack, fromDate, toDate);
- var waitTask = _dataService.GetTotalWaitStatsTrendAsync(hoursBack, fromDate, toDate);
- var blockingTask = _dataService.GetBlockedSessionTrendAsync(hoursBack, fromDate, toDate);
- var deadlockTask = _dataService.GetDeadlockTrendAsync(hoursBack, fromDate, toDate);
- var memoryTask = _dataService.GetMemoryStatsAsync(hoursBack, fromDate, toDate);
- var fileIoTask = _dataService.GetFileIoLatencyTimeSeriesAsync(false, hoursBack, fromDate, toDate);
-
- // Fetch baselines for band rendering if provider is available
- var referenceTime = fromDate ?? DateTime.UtcNow.AddHours(-hoursBack);
- Task? cpuBaselineTask = null;
- Task? waitBaselineTask = null;
- Task? ioBaselineTask = null;
- Task? blockingBaselineTask = null;
- Task? deadlockBaselineTask = null;
-
- if (_baselineProvider != null)
- {
- cpuBaselineTask = GetBaselineAsync(SqlServerMetricNames.Cpu, referenceTime);
- waitBaselineTask = GetBaselineAsync(SqlServerMetricNames.WaitStats, referenceTime);
- ioBaselineTask = GetBaselineAsync(SqlServerMetricNames.IoLatency, referenceTime);
- blockingBaselineTask = GetBaselineAsync(SqlServerMetricNames.Blocking, referenceTime);
- deadlockBaselineTask = GetBaselineAsync(SqlServerMetricNames.Deadlock, referenceTime);
- }
+ var tasks = new List { cpuTask, waitTask, blockingTask, deadlockTask, memoryTask, fileIoTask };
+ if (cpuBaselineTask != null) tasks.Add(cpuBaselineTask);
+ if (waitBaselineTask != null) tasks.Add(waitBaselineTask);
+ if (ioBaselineTask != null) tasks.Add(ioBaselineTask);
+ if (blockingBaselineTask != null) tasks.Add(blockingBaselineTask);
+ if (deadlockBaselineTask != null) tasks.Add(deadlockBaselineTask);
+ await Task.WhenAll(tasks);
+ }
+ catch (Exception ex)
+ {
+ Debug.WriteLine($"CorrelatedLanes: Data fetch failed: {ex.Message}");
+ }
- try
- {
- var tasks = new List { cpuTask, waitTask, blockingTask, deadlockTask, memoryTask, fileIoTask };
- if (cpuBaselineTask != null) tasks.Add(cpuBaselineTask);
- if (waitBaselineTask != null) tasks.Add(waitBaselineTask);
- if (ioBaselineTask != null) tasks.Add(ioBaselineTask);
- if (blockingBaselineTask != null) tasks.Add(blockingBaselineTask);
- if (deadlockBaselineTask != null) tasks.Add(deadlockBaselineTask);
- await Task.WhenAll(tasks);
- }
- catch (Exception ex)
- {
- Debug.WriteLine($"CorrelatedLanes: Data fetch failed: {ex.Message}");
- }
+ var cpuBaseline = cpuBaselineTask is { IsCompletedSuccessfully: true } ? cpuBaselineTask.Result : null;
+ var waitBaseline = waitBaselineTask is { IsCompletedSuccessfully: true } ? waitBaselineTask.Result : null;
+ var ioBaseline = ioBaselineTask is { IsCompletedSuccessfully: true } ? ioBaselineTask.Result : null;
+ var blockingBaseline = blockingBaselineTask is { IsCompletedSuccessfully: true } ? blockingBaselineTask.Result : null;
+ var deadlockBaseline = deadlockBaselineTask is { IsCompletedSuccessfully: true } ? deadlockBaselineTask.Result : null;
+ var blockingLaneBaseline = blockingBaseline ?? deadlockBaseline;
+
+ // minAnomalyValue: absolute floor below which dots/arrows are suppressed even if outside band.
+ // Prevents "1% CPU above 0.5% baseline" false alarms on idle servers.
+ if (cpuTask.IsCompletedSuccessfully)
+ UpdateLane(CpuChart, "CPU %",
+ cpuTask.Result.OrderBy(d => d.SampleTime)
+ .Select(d => (d.SampleTime.ToOADate(), (double)d.SqlServerCpuUtilization)).ToList(),
+ "#4FC3F7", 0, 105, cpuBaseline, minAnomalyValue: 10);
+ else
+ ShowEmpty(CpuChart, "CPU %");
- var cpuBaseline = cpuBaselineTask is { IsCompletedSuccessfully: true } ? cpuBaselineTask.Result : null;
- var waitBaseline = waitBaselineTask is { IsCompletedSuccessfully: true } ? waitBaselineTask.Result : null;
- var ioBaseline = ioBaselineTask is { IsCompletedSuccessfully: true } ? ioBaselineTask.Result : null;
- var blockingBaseline = blockingBaselineTask is { IsCompletedSuccessfully: true } ? blockingBaselineTask.Result : null;
- var deadlockBaseline = deadlockBaselineTask is { IsCompletedSuccessfully: true } ? deadlockBaselineTask.Result : null;
- var blockingLaneBaseline = blockingBaseline ?? deadlockBaseline;
-
- // minAnomalyValue: absolute floor below which dots/arrows are suppressed even if outside band.
- // Prevents "1% CPU above 0.5% baseline" false alarms on idle servers.
- if (cpuTask.IsCompletedSuccessfully)
- UpdateLane(CpuChart, "CPU %",
- cpuTask.Result.OrderBy(d => d.SampleTime)
- .Select(d => (d.SampleTime.ToOADate(), (double)d.SqlServerCpuUtilization)).ToList(),
- "#4FC3F7", 0, 105, cpuBaseline, minAnomalyValue: 10);
- else
- ShowEmpty(CpuChart, "CPU %");
-
- if (waitTask.IsCompletedSuccessfully)
- UpdateLane(WaitStatsChart, "Wait ms/sec",
- waitTask.Result.Select(d => (d.CollectionTime.ToOADate(), (double)d.WaitTimeMsPerSecond)).ToList(),
- "#FFB74D", baseline: waitBaseline, minAnomalyValue: 100);
- else
- ShowEmpty(WaitStatsChart, "Wait ms/sec");
-
- try
- {
- var blockingData = blockingTask.IsCompletedSuccessfully
- ? blockingTask.Result
- .GroupBy(d => d.CollectionTime)
- .OrderBy(g => g.Key)
- .Select(g => (g.Key.ToOADate(), (double)g.Sum(x => x.BlockedCount)))
- .ToList()
- : new List<(double, double)>();
- var deadlockData = deadlockTask.IsCompletedSuccessfully
- ? deadlockTask.Result
- .Select(d => (d.CollectionTime.ToOADate(), (double)d.BlockedCount))
- .ToList()
- : new List<(double, double)>();
- UpdateBlockingLane(blockingData, deadlockData, blockingLaneBaseline);
- }
- catch (Exception ex)
- {
- Debug.WriteLine($"CorrelatedLanes: Blocking lane failed: {ex}");
- ShowEmpty(BlockingChart, "Blocking & Deadlocking");
- }
+ if (waitTask.IsCompletedSuccessfully)
+ UpdateLane(WaitStatsChart, "Wait ms/sec",
+ waitTask.Result.Select(d => (d.CollectionTime.ToOADate(), (double)d.WaitTimeMsPerSecond)).ToList(),
+ "#FFB74D", baseline: waitBaseline, minAnomalyValue: 100);
+ else
+ ShowEmpty(WaitStatsChart, "Wait ms/sec");
- if (memoryTask.IsCompletedSuccessfully)
- UpdateLane(MemoryChart, "Buffer Pool MB",
- memoryTask.Result.Select(d => (d.CollectionTime.ToOADate(), (double)d.TotalMemoryMb)).ToList(),
- "#CE93D8");
- else
- ShowEmpty(MemoryChart, "Buffer Pool MB");
+ try
+ {
+ var blockingData = blockingTask.IsCompletedSuccessfully
+ ? blockingTask.Result
+ .GroupBy(d => d.CollectionTime)
+ .OrderBy(g => g.Key)
+ .Select(g => (g.Key.ToOADate(), (double)g.Sum(x => x.BlockedCount)))
+ .ToList()
+ : new List<(double, double)>();
+ var deadlockData = deadlockTask.IsCompletedSuccessfully
+ ? deadlockTask.Result
+ .Select(d => (d.CollectionTime.ToOADate(), (double)d.BlockedCount))
+ .ToList()
+ : new List<(double, double)>();
+ UpdateBlockingLane(blockingData, deadlockData, blockingLaneBaseline);
+ }
+ catch (Exception ex)
+ {
+ Debug.WriteLine($"CorrelatedLanes: Blocking lane failed: {ex}");
+ ShowEmpty(BlockingChart, "Blocking & Deadlocking");
+ }
+
+ if (memoryTask.IsCompletedSuccessfully)
+ UpdateLane(MemoryChart, "Buffer Pool MB",
+ memoryTask.Result.Select(d => (d.CollectionTime.ToOADate(), (double)d.TotalMemoryMb)).ToList(),
+ "#CE93D8");
+ else
+ ShowEmpty(MemoryChart, "Buffer Pool MB");
+
+ if (fileIoTask.IsCompletedSuccessfully)
+ {
+ var ioGrouped = fileIoTask.Result
+ .GroupBy(d => d.CollectionTime)
+ .OrderBy(g => g.Key)
+ .Select(g => (g.Key.ToOADate(), (double)g.Average(x => x.ReadLatencyMs)))
+ .ToList();
+ UpdateLane(FileIoChart, "I/O ms", ioGrouped, "#81C784", baseline: ioBaseline, minAnomalyValue: 2);
+ }
+ else
+ ShowEmpty(FileIoChart, "I/O ms");
+
+ // Comparison overlay — fetch reference period data and render as ghost lines
+ if (comparisonRange.HasValue)
+ {
+ var refFrom = comparisonRange.Value.From;
+ var refTo = comparisonRange.Value.To;
+ var timeShift = (fromDate ?? DateTime.UtcNow.AddHours(-hoursBack)) - refFrom;
+
+ var refCpuTask = _dataService.GetCpuUtilizationAsync(0, refFrom, refTo);
+ var refWaitTask = _dataService.GetTotalWaitStatsTrendAsync(0, refFrom, refTo);
+ var refBlockingTask = _dataService.GetBlockedSessionTrendAsync(0, refFrom, refTo);
+ var refMemoryTask = _dataService.GetMemoryStatsAsync(0, refFrom, refTo);
+ var refIoTask = _dataService.GetFileIoLatencyTimeSeriesAsync(false, 0, refFrom, refTo);
- if (fileIoTask.IsCompletedSuccessfully)
+ try { await Task.WhenAll(refCpuTask, refWaitTask, refBlockingTask, refMemoryTask, refIoTask); }
+ catch (Exception ex) { Debug.WriteLine($"CorrelatedLanes: Comparison fetch failed: {ex.Message}"); }
+
+ if (refCpuTask.IsCompletedSuccessfully)
+ AddGhostLine(CpuChart, refCpuTask.Result
+ .Select(d => (d.SampleTime.Add(timeShift).ToOADate(), (double)d.SqlServerCpuUtilization)).ToList(), "#4FC3F7");
+
+ if (refWaitTask.IsCompletedSuccessfully)
+ AddGhostLine(WaitStatsChart, refWaitTask.Result
+ .Select(d => (d.CollectionTime.Add(timeShift).ToOADate(), (double)d.WaitTimeMsPerSecond)).ToList(), "#FFB74D");
+
+ if (refBlockingTask.IsCompletedSuccessfully)
{
- var ioGrouped = fileIoTask.Result
+ var refBlocking = refBlockingTask.Result
.GroupBy(d => d.CollectionTime)
.OrderBy(g => g.Key)
- .Select(g => (g.Key.ToOADate(), (double)g.Average(x => x.ReadLatencyMs)))
+ .Select(g => (g.Key.Add(timeShift).ToOADate(), (double)g.Sum(x => x.BlockedCount)))
.ToList();
- UpdateLane(FileIoChart, "I/O ms", ioGrouped, "#81C784", baseline: ioBaseline, minAnomalyValue: 2);
+ if (refBlocking.Count > 0)
+ AddGhostLine(BlockingChart, refBlocking, "#E57373");
}
- else
- ShowEmpty(FileIoChart, "I/O ms");
- // Comparison overlay — fetch reference period data and render as ghost lines
- if (comparisonRange.HasValue)
+ if (refMemoryTask.IsCompletedSuccessfully)
+ AddGhostLine(MemoryChart, refMemoryTask.Result
+ .Select(d => (d.CollectionTime.Add(timeShift).ToOADate(), (double)d.TotalMemoryMb)).ToList(), "#CE93D8");
+
+ if (refIoTask.IsCompletedSuccessfully)
{
- var refFrom = comparisonRange.Value.From;
- var refTo = comparisonRange.Value.To;
- var timeShift = (fromDate ?? DateTime.UtcNow.AddHours(-hoursBack)) - refFrom;
-
- var refCpuTask = _dataService.GetCpuUtilizationAsync(0, refFrom, refTo);
- var refWaitTask = _dataService.GetTotalWaitStatsTrendAsync(0, refFrom, refTo);
- var refBlockingTask = _dataService.GetBlockedSessionTrendAsync(0, refFrom, refTo);
- var refMemoryTask = _dataService.GetMemoryStatsAsync(0, refFrom, refTo);
- var refIoTask = _dataService.GetFileIoLatencyTimeSeriesAsync(false, 0, refFrom, refTo);
-
- try { await Task.WhenAll(refCpuTask, refWaitTask, refBlockingTask, refMemoryTask, refIoTask); }
- catch (Exception ex) { Debug.WriteLine($"CorrelatedLanes: Comparison fetch failed: {ex.Message}"); }
-
- if (refCpuTask.IsCompletedSuccessfully)
- AddGhostLine(CpuChart, refCpuTask.Result
- .Select(d => (d.SampleTime.Add(timeShift).ToOADate(), (double)d.SqlServerCpuUtilization)).ToList(), "#4FC3F7");
-
- if (refWaitTask.IsCompletedSuccessfully)
- AddGhostLine(WaitStatsChart, refWaitTask.Result
- .Select(d => (d.CollectionTime.Add(timeShift).ToOADate(), (double)d.WaitTimeMsPerSecond)).ToList(), "#FFB74D");
-
- if (refBlockingTask.IsCompletedSuccessfully)
- {
- var refBlocking = refBlockingTask.Result
- .GroupBy(d => d.CollectionTime)
- .OrderBy(g => g.Key)
- .Select(g => (g.Key.Add(timeShift).ToOADate(), (double)g.Sum(x => x.BlockedCount)))
- .ToList();
- if (refBlocking.Count > 0)
- AddGhostLine(BlockingChart, refBlocking, "#E57373");
- }
-
- if (refMemoryTask.IsCompletedSuccessfully)
- AddGhostLine(MemoryChart, refMemoryTask.Result
- .Select(d => (d.CollectionTime.Add(timeShift).ToOADate(), (double)d.TotalMemoryMb)).ToList(), "#CE93D8");
-
- if (refIoTask.IsCompletedSuccessfully)
- {
- var refIo = refIoTask.Result
- .GroupBy(d => d.CollectionTime)
- .OrderBy(g => g.Key)
- .Select(g => (g.Key.Add(timeShift).ToOADate(), (double)g.Average(x => x.ReadLatencyMs)))
- .ToList();
- AddGhostLine(FileIoChart, refIo, "#81C784");
- }
-
- _crosshairManager?.SetComparisonLabel(ComparisonLabel(comparisonRange.Value, fromDate, hoursBack));
+ var refIo = refIoTask.Result
+ .GroupBy(d => d.CollectionTime)
+ .OrderBy(g => g.Key)
+ .Select(g => (g.Key.Add(timeShift).ToOADate(), (double)g.Average(x => x.ReadLatencyMs)))
+ .ToList();
+ AddGhostLine(FileIoChart, refIo, "#81C784");
}
- _crosshairManager?.ReattachVLines();
- SyncXAxes(hoursBack, fromDate, toDate);
- }
- finally
- {
- _isRefreshing = false;
+ _crosshairManager?.SetComparisonLabel(ComparisonLabel(comparisonRange.Value, fromDate, hoursBack));
}
+
+ _crosshairManager?.ReattachVLines();
+ SyncXAxes(hoursBack, fromDate, toDate);
}
///
diff --git a/Dashboard/Controls/FinOpsContent.xaml b/Dashboard/Controls/FinOpsContent.xaml
index c18bc1b5..bfea5d36 100644
--- a/Dashboard/Controls/FinOpsContent.xaml
+++ b/Dashboard/Controls/FinOpsContent.xaml
@@ -2447,11 +2447,19 @@
+
+
+
+
+
+
+
+
-
+
diff --git a/Dashboard/Controls/MemoryContent.xaml.cs b/Dashboard/Controls/MemoryContent.xaml.cs
index 678ff5a3..522a6005 100644
--- a/Dashboard/Controls/MemoryContent.xaml.cs
+++ b/Dashboard/Controls/MemoryContent.xaml.cs
@@ -106,7 +106,11 @@ public MemoryContent()
SetupChartContextMenus();
Loaded += OnLoaded;
Helpers.ThemeManager.ThemeChanged += OnThemeChanged;
- Unloaded += (_, _) => Helpers.ThemeManager.ThemeChanged -= OnThemeChanged;
+ Unloaded += (_, _) =>
+ {
+ Helpers.ThemeManager.ThemeChanged -= OnThemeChanged;
+ DisposeChartHelpers();
+ };
// Apply dark theme immediately so charts don't flash white before data loads
TabHelpers.ApplyThemeToChart(MemoryStatsOverviewChart);
@@ -124,6 +128,16 @@ public MemoryContent()
_memoryPressureEventsHover = new Helpers.ChartHoverHelper(MemoryPressureEventsChart, "events");
}
+ public void DisposeChartHelpers()
+ {
+ _memoryStatsOverviewHover?.Dispose();
+ _memoryGrantSizingHover?.Dispose();
+ _memoryGrantActivityHover?.Dispose();
+ _memoryClerksHover?.Dispose();
+ _planCacheHover?.Dispose();
+ _memoryPressureEventsHover?.Dispose();
+ }
+
private void OnLoaded(object sender, RoutedEventArgs e)
{
// No grids to configure - all tabs are chart-only now
diff --git a/Dashboard/Controls/QueryPerformanceContent.xaml.cs b/Dashboard/Controls/QueryPerformanceContent.xaml.cs
index f2147f83..8417afa8 100644
--- a/Dashboard/Controls/QueryPerformanceContent.xaml.cs
+++ b/Dashboard/Controls/QueryPerformanceContent.xaml.cs
@@ -276,9 +276,18 @@ private void OnUnloaded(object sender, RoutedEventArgs e)
_qsRegressionsUnfilteredData = null;
_lrqPatternsUnfilteredData = null;
+ DisposeChartHelpers();
Helpers.ThemeManager.ThemeChanged -= OnThemeChanged;
}
+ public void DisposeChartHelpers()
+ {
+ _queryDurationHover?.Dispose();
+ _procDurationHover?.Dispose();
+ _qsDurationHover?.Dispose();
+ _execTrendsHover?.Dispose();
+ }
+
private void OnThemeChanged(string _)
{
foreach (var field in GetType().GetFields(
diff --git a/Dashboard/Controls/ResourceMetricsContent.xaml.cs b/Dashboard/Controls/ResourceMetricsContent.xaml.cs
index e66c6e92..74329671 100644
--- a/Dashboard/Controls/ResourceMetricsContent.xaml.cs
+++ b/Dashboard/Controls/ResourceMetricsContent.xaml.cs
@@ -130,7 +130,11 @@ public ResourceMetricsContent()
SetupChartContextMenus();
Loaded += OnLoaded;
Helpers.ThemeManager.ThemeChanged += OnThemeChanged;
- Unloaded += (_, _) => Helpers.ThemeManager.ThemeChanged -= OnThemeChanged;
+ Unloaded += (_, _) =>
+ {
+ Helpers.ThemeManager.ThemeChanged -= OnThemeChanged;
+ DisposeChartHelpers();
+ };
// Apply dark theme immediately so charts don't flash white before data loads
TabHelpers.ApplyThemeToChart(LatchStatsChart);
@@ -158,11 +162,23 @@ public ResourceMetricsContent()
_tempDbLatencyHover = new Helpers.ChartHoverHelper(TempDbLatencyChart, "ms");
}
- private void OnLoaded(object sender, RoutedEventArgs e)
+ public void DisposeChartHelpers()
{
- // Apply minimum column widths based on header text
+ _sessionStatsHover?.Dispose();
+ _latchStatsHover?.Dispose();
+ _spinlockStatsHover?.Dispose();
+ _fileIoReadHover?.Dispose();
+ _fileIoWriteHover?.Dispose();
+ _fileIoReadThroughputHover?.Dispose();
+ _fileIoWriteThroughputHover?.Dispose();
+ _perfmonHover?.Dispose();
+ _waitStatsHover?.Dispose();
+ _tempdbStatsHover?.Dispose();
+ _tempDbLatencyHover?.Dispose();
+ }
- // Freeze identifier columns
+ private void OnLoaded(object sender, RoutedEventArgs e)
+ {
}
private void OnThemeChanged(string _)
diff --git a/Dashboard/Dashboard.csproj b/Dashboard/Dashboard.csproj
index 8fc70396..b7a0103f 100644
--- a/Dashboard/Dashboard.csproj
+++ b/Dashboard/Dashboard.csproj
@@ -7,10 +7,10 @@
PerformanceMonitorDashboard.Program
PerformanceMonitorDashboard
SQL Server Performance Monitor Dashboard
- 2.6.0
- 2.6.0.0
- 2.6.0.0
- 2.6.0
+ 2.7.0
+ 2.7.0.0
+ 2.7.0.0
+ 2.7.0
Darling Data, LLC
Copyright © 2026 Darling Data, LLC
EDD.ico
diff --git a/Dashboard/Helpers/ChartHoverHelper.cs b/Dashboard/Helpers/ChartHoverHelper.cs
index 1fb73cc2..b1ec6f11 100644
--- a/Dashboard/Helpers/ChartHoverHelper.cs
+++ b/Dashboard/Helpers/ChartHoverHelper.cs
@@ -56,6 +56,14 @@ public ChartHoverHelper(ScottPlot.WPF.WpfPlot chart, string unit)
public string Unit { get => _unit; set => _unit = value; }
+ public void Dispose()
+ {
+ _chart.MouseMove -= OnMouseMove;
+ _chart.MouseLeave -= OnMouseLeave;
+ _popup.IsOpen = false;
+ _scatters.Clear();
+ }
+
public void Clear() => _scatters.Clear();
public void Add(ScottPlot.Plottables.Scatter scatter, string label) =>
diff --git a/Dashboard/MainWindow.xaml.cs b/Dashboard/MainWindow.xaml.cs
index 396e8ebb..dfb032d2 100644
--- a/Dashboard/MainWindow.xaml.cs
+++ b/Dashboard/MainWindow.xaml.cs
@@ -56,6 +56,8 @@ public partial class MainWindow : Window
private Controls.FinOpsContent? _finOpsContent;
private AlertsHistoryContent? _alertsHistoryContent;
+ private readonly Dictionary _alertAcknowledgedHandlers = new();
+
private McpHostService? _mcpHostService;
private CancellationTokenSource? _mcpCts;
@@ -571,12 +573,14 @@ private async Task OpenServerTabAsync(ServerConnection server)
System.Windows.MessageBoxImage.Error);
return;
}
- serverTab.AlertAcknowledged += (_, _) =>
+ EventHandler alertHandler = (_, _) =>
{
_emailAlertService.HideAllAlerts(8760, server.DisplayNameWithIntent);
UpdateAlertBadge();
_alertsHistoryContent?.RefreshAlerts();
};
+ serverTab.AlertAcknowledged += alertHandler;
+ _alertAcknowledgedHandlers[server.Id] = alertHandler;
var headerPanel = new StackPanel { Orientation = Orientation.Horizontal };
var headerText = new TextBlock
@@ -875,6 +879,15 @@ private void CloseTab_Click(object sender, RoutedEventArgs e)
}
else if (_openTabs.TryGetValue(tabId, out var tabToClose))
{
+ if (tabToClose.Content is ServerTab serverTab)
+ {
+ if (_alertAcknowledgedHandlers.TryGetValue(tabId, out var handler))
+ {
+ serverTab.AlertAcknowledged -= handler;
+ _alertAcknowledgedHandlers.Remove(tabId);
+ }
+ serverTab.CleanupOnClose();
+ }
_openTabs.Remove(tabId);
_tabBadges.Remove(tabId);
ServerTabControl.Items.Remove(tabToClose);
@@ -1080,6 +1093,7 @@ private async void RemoveServer_Click(object sender, RoutedEventArgs e)
if (_openTabs.TryGetValue(server.Id, out var tabItem))
{
+ if (tabItem.Content is ServerTab st) st.CleanupOnClose();
_openTabs.Remove(server.Id);
ServerTabControl.Items.Remove(tabItem);
}
diff --git a/Dashboard/Mcp/McpQueryTools.cs b/Dashboard/Mcp/McpQueryTools.cs
index 4bfb25b1..8cba303f 100644
--- a/Dashboard/Mcp/McpQueryTools.cs
+++ b/Dashboard/Mcp/McpQueryTools.cs
@@ -38,19 +38,13 @@ public static async Task GetTopQueriesByCpu(
var topError = McpHelpers.ValidateTop(top, "top");
if (topError != null) return topError;
- var rows = await resolved.Value.Service.GetQueryStatsAsync(hours_back);
+ var rows = await resolved.Value.Service.GetQueryStatsForMcpAsync(hours_back, top, database_name, parallel_only, min_dop);
if (rows.Count == 0)
{
return "No query stats available for the specified time range.";
}
- IEnumerable filtered = rows;
- if (!string.IsNullOrEmpty(database_name))
- filtered = filtered.Where(r => string.Equals(r.DatabaseName, database_name, StringComparison.OrdinalIgnoreCase));
- if (parallel_only || min_dop > 1)
- filtered = filtered.Where(r => r.MaxDop > 1 && r.MaxDop >= (min_dop > 1 ? min_dop : 2));
-
- var result = filtered.Take(top).Select(r => new
+ var result = rows.Select(r => new
{
database_name = r.DatabaseName,
query_hash = r.QueryHash,
@@ -116,17 +110,13 @@ public static async Task GetTopProceduresByCpu(
var topError = McpHelpers.ValidateTop(top, "top");
if (topError != null) return topError;
- var rows = await resolved.Value.Service.GetProcedureStatsAsync(hours_back);
+ var rows = await resolved.Value.Service.GetProcedureStatsForMcpAsync(hours_back, top, database_name);
if (rows.Count == 0)
{
return "No procedure stats available for the specified time range.";
}
- IEnumerable filtered = rows;
- if (!string.IsNullOrEmpty(database_name))
- filtered = filtered.Where(r => string.Equals(r.DatabaseName, database_name, StringComparison.OrdinalIgnoreCase));
-
- var result = filtered.Take(top).Select(r => new
+ var result = rows.Select(r => new
{
database_name = r.DatabaseName,
full_name = r.FullObjectName,
@@ -187,19 +177,13 @@ public static async Task GetQueryStoreTop(
var topError = McpHelpers.ValidateTop(top, "top");
if (topError != null) return topError;
- var rows = await resolved.Value.Service.GetQueryStoreDataAsync(hours_back);
+ var rows = await resolved.Value.Service.GetQueryStoreDataForMcpAsync(hours_back, top, database_name, parallel_only, min_dop);
if (rows.Count == 0)
{
return "No Query Store data available. Query Store may not be enabled on target databases.";
}
- IEnumerable filtered = rows;
- if (!string.IsNullOrEmpty(database_name))
- filtered = filtered.Where(r => string.Equals(r.DatabaseName, database_name, StringComparison.OrdinalIgnoreCase));
- if (parallel_only || min_dop > 1)
- filtered = filtered.Where(r => r.MaxDop > 1 && r.MaxDop >= (min_dop > 1 ? min_dop : 2));
-
- var result = filtered.Take(top).Select(r => new
+ var result = rows.Select(r => new
{
database_name = r.DatabaseName,
query_id = r.QueryId,
diff --git a/Dashboard/Models/ServerConnection.cs b/Dashboard/Models/ServerConnection.cs
index 15ca76c0..ba80d9fa 100644
--- a/Dashboard/Models/ServerConnection.cs
+++ b/Dashboard/Models/ServerConnection.cs
@@ -69,6 +69,12 @@ public bool UseWindowsAuth
///
public bool ReadOnlyIntent { get; set; } = false;
+ ///
+ /// When true, sets MultiSubnetFailover=true on the connection string.
+ /// Recommended for AG listeners and FCIs spanning multiple subnets.
+ ///
+ public bool MultiSubnetFailover { get; set; } = false;
+
///
/// Monthly cost of this server in USD, used for FinOps cost attribution.
/// Set to 0 to hide cost columns. All FinOps costs are proportional to this budget.
@@ -120,6 +126,7 @@ public string GetConnectionString(ICredentialService credentialService)
_ => SqlConnectionEncryptOption.Mandatory
},
ApplicationIntent = ReadOnlyIntent ? ApplicationIntent.ReadOnly : ApplicationIntent.ReadWrite,
+ MultiSubnetFailover = MultiSubnetFailover,
Authentication = SqlAuthenticationMethod.ActiveDirectoryInteractive
};
@@ -151,7 +158,8 @@ public string GetConnectionString(ICredentialService credentialService)
password,
EncryptMode,
TrustServerCertificate,
- ReadOnlyIntent
+ ReadOnlyIntent,
+ MultiSubnetFailover
).ConnectionString;
}
diff --git a/Dashboard/ServerTab.xaml.cs b/Dashboard/ServerTab.xaml.cs
index a6eb9cf6..4ba7629e 100644
--- a/Dashboard/ServerTab.xaml.cs
+++ b/Dashboard/ServerTab.xaml.cs
@@ -5,6 +5,7 @@
using System.Windows.Data;
using System.Text;
using System.Collections.Generic;
+using System.Threading;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Controls;
@@ -46,7 +47,9 @@ public partial class ServerTab : UserControl
private readonly UserPreferencesService _preferencesService;
private DispatcherTimer? _autoRefreshTimer;
+ private CancellationTokenSource? _autoRefreshCts;
private bool _isRefreshing;
+ private DateTime _refreshStartedUtc;
private bool _suppressPickerUpdates;
// Filter state dictionaries for each DataGrid
@@ -69,6 +72,14 @@ public partial class ServerTab : UserControl
// Legend panel references for edge-based legends (ScottPlot issue #4717 workaround)
private Dictionary _legendPanels = new();
+ // Stored event handler delegates for cleanup
+ private Action? _viewPlanHandler;
+ private Action? _actualPlanStartedHandler;
+ private Action? _actualPlanFinishedHandler;
+ private Action? _drillDownTimeRangeHandler;
+ private Action? _subTabChangedHandler;
+ private Analysis.SqlServerBaselineProvider? _baselineProvider;
+
// Chart hover tooltips
private Helpers.ChartHoverHelper? _resourceOverviewCpuHover;
private Helpers.ChartHoverHelper? _resourceOverviewMemoryHover;
@@ -118,7 +129,6 @@ public ServerTab(ServerConnection serverConnection, int utcOffsetMinutes = 0)
InitializeDefaultTimeRanges();
SetupChartContextMenus();
- SetupAutoRefresh();
SetupSubTabContextMenus();
BlockingSlicer.RangeChanged += OnBlockingSlicerChanged;
@@ -140,27 +150,23 @@ public ServerTab(ServerConnection serverConnection, int utcOffsetMinutes = 0)
MemoryTab.Initialize(_databaseService);
MemoryTab.ChartDrillDownRequested += OnChildChartDrillDown;
PerformanceTab.Initialize(_databaseService, s => StatusText.Text = s);
- PerformanceTab.ViewPlanRequested += (planXml, label, queryText) =>
+ _viewPlanHandler = (planXml, label, queryText) =>
{
OpenPlanTab(planXml, label, queryText);
PlanViewerTabItem.IsSelected = true;
};
- PerformanceTab.ActualPlanStarted += (label) =>
- {
- ShowPlanLoading(label);
- };
- PerformanceTab.ActualPlanFinished += () =>
- {
- HidePlanLoading();
- };
- PerformanceTab.DrillDownTimeRangeRequested += (from, to) =>
- {
- SetDrillDownGlobalRange(from, to);
- };
- PerformanceTab.SubTabChanged += () => UpdateCompareDropdownState();
+ _actualPlanStartedHandler = (label) => ShowPlanLoading(label);
+ _actualPlanFinishedHandler = () => HidePlanLoading();
+ _drillDownTimeRangeHandler = (from, to) => SetDrillDownGlobalRange(from, to);
+ _subTabChangedHandler = () => UpdateCompareDropdownState();
+ PerformanceTab.ViewPlanRequested += _viewPlanHandler;
+ PerformanceTab.ActualPlanStarted += _actualPlanStartedHandler;
+ PerformanceTab.ActualPlanFinished += _actualPlanFinishedHandler;
+ PerformanceTab.DrillDownTimeRangeRequested += _drillDownTimeRangeHandler;
+ PerformanceTab.SubTabChanged += _subTabChangedHandler;
SystemEventsContent.Initialize(_databaseService);
- var baselineProvider = new Analysis.SqlServerBaselineProvider(_databaseService.ConnectionString);
- ResourceMetricsContent.Initialize(_databaseService, baselineProvider);
+ _baselineProvider = new Analysis.SqlServerBaselineProvider(_databaseService.ConnectionString);
+ ResourceMetricsContent.Initialize(_databaseService, _baselineProvider);
ResourceMetricsContent.ChartDrillDownRequested += OnChildChartDrillDown;
// Set default time range on UserControls based on user preferences
@@ -338,51 +344,136 @@ private void SetupAutoRefresh()
if (prefs.AutoRefreshEnabled)
{
- _autoRefreshTimer = new DispatcherTimer
- {
- Interval = TimeSpan.FromSeconds(prefs.AutoRefreshIntervalSeconds)
- };
- _autoRefreshTimer.Tick += async (s, e) =>
+ StartAutoRefreshLoop(prefs.AutoRefreshIntervalSeconds);
+ AutoRefreshToggle.IsChecked = true;
+ AutoRefreshToggle.Content = $"Auto-Refresh: {prefs.AutoRefreshIntervalSeconds}s";
+ }
+ else
+ {
+ AutoRefreshToggle.IsChecked = false;
+ AutoRefreshToggle.Content = "Auto-Refresh: Off";
+ }
+ }
+
+ ///
+ /// Async loop that replaces DispatcherTimer for auto-refresh. Task.Delay is not
+ /// subject to Dispatcher priority starvation under heavy UI load (chart rendering,
+ /// data binding) that can indefinitely defer Background-priority DispatcherTimer ticks.
+ ///
+ private async void StartAutoRefreshLoop(int intervalSeconds)
+ {
+ if (_autoRefreshCts != null && !_autoRefreshCts.IsCancellationRequested)
+ return;
+
+ _autoRefreshCts?.Cancel();
+ var cts = new CancellationTokenSource();
+ _autoRefreshCts = cts;
+
+ try
+ {
+ while (!cts.Token.IsCancellationRequested)
{
- if (_isRefreshing) return;
- _isRefreshing = true;
+ await Task.Delay(TimeSpan.FromSeconds(intervalSeconds), cts.Token);
+ if (cts.Token.IsCancellationRequested) break;
+ if (_isRefreshing) continue;
+ _isRefreshing = true;
+ _refreshStartedUtc = DateTime.UtcNow;
try
{
- await LoadDataAsync(fullRefresh: false);
+ var sw = System.Diagnostics.Stopwatch.StartNew();
+ await RefreshVisibleTabAsync();
+ StatusText.Text = "Ready";
+ FooterText.Text = $"Last refresh: {DateTime.Now:yyyy-MM-dd HH:mm:ss} | Server: {_serverConnection.DisplayName}";
+ Logger.Info($"Auto-refresh completed in {sw.ElapsedMilliseconds}ms for {_serverConnection.DisplayName}");
}
- catch (Exception ex)
+ catch (OperationCanceledException) when (!cts.Token.IsCancellationRequested)
{
- Logger.Error($"Error in auto-refresh: {ex.Message}", ex);
+ Logger.Error($"Auto-refresh query cancelled for {_serverConnection.DisplayName}, continuing loop");
+ }
+ catch (Exception ex) when (ex is not OperationCanceledException)
+ {
+ Logger.Error($"Auto-refresh error: {ex.Message}", ex);
StatusText.Text = "Auto-refresh error";
}
finally
{
_isRefreshing = false;
}
- };
- _autoRefreshTimer.Start();
- AutoRefreshToggle.IsChecked = true;
- AutoRefreshToggle.Content = $"Auto-Refresh: {prefs.AutoRefreshIntervalSeconds}s";
+ }
}
- else
+ catch (OperationCanceledException)
{
- AutoRefreshToggle.IsChecked = false;
- AutoRefreshToggle.Content = "Auto-Refresh: Off";
+ Logger.Info($"Auto-refresh loop stopped for {_serverConnection.DisplayName}");
}
}
private void ServerTab_Unloaded(object sender, RoutedEventArgs e)
{
- // Stop the timer when the tab is closed
+ // WPF fires Unloaded on tab switch, not just destruction.
+ // Don't tear down state here — the auto-refresh loop and chart
+ // state must survive tab switches. Cleanup happens when the tab
+ // is actually removed from the TabControl (via CleanupOnClose).
+ }
+
+ ///
+ /// Full cleanup — call when the server tab is permanently removed, not on tab switch.
+ ///
+ public void CleanupOnClose()
+ {
+ _autoRefreshCts?.Cancel();
_autoRefreshTimer?.Stop();
_autoRefreshTimer = null;
- // Unsubscribe event handlers to prevent memory leaks
Helpers.ThemeManager.ThemeChanged -= OnThemeChanged;
Loaded -= ServerTab_Loaded;
Unloaded -= ServerTab_Unloaded;
KeyDown -= ServerTab_KeyDown;
+
+ BlockingSlicer.RangeChanged -= OnBlockingSlicerChanged;
+ DeadlockSlicer.RangeChanged -= OnDeadlockSlicerChanged;
+
+ CriticalIssuesTab.InvestigateRequested -= OnInvestigateCriticalIssue;
+ MemoryTab.ChartDrillDownRequested -= OnChildChartDrillDown;
+ ResourceMetricsContent.ChartDrillDownRequested -= OnChildChartDrillDown;
+
+ if (_viewPlanHandler != null) PerformanceTab.ViewPlanRequested -= _viewPlanHandler;
+ if (_actualPlanStartedHandler != null) PerformanceTab.ActualPlanStarted -= _actualPlanStartedHandler;
+ if (_actualPlanFinishedHandler != null) PerformanceTab.ActualPlanFinished -= _actualPlanFinishedHandler;
+ if (_drillDownTimeRangeHandler != null) PerformanceTab.DrillDownTimeRangeRequested -= _drillDownTimeRangeHandler;
+ if (_subTabChangedHandler != null) PerformanceTab.SubTabChanged -= _subTabChangedHandler;
+
+ DisposeChartHelpers();
+
+ _collectionHealthUnfilteredData = null;
+ _blockingEventsUnfilteredData = null;
+ _deadlocksUnfilteredData = null;
+ _collectionHealthFilters.Clear();
+ _blockingEventsFilters.Clear();
+ _deadlocksFilters.Clear();
+ _legendPanels.Clear();
+
+ _baselineProvider?.ClearCache();
+ }
+
+ public void DisposeChartHelpers()
+ {
+ _resourceOverviewCpuHover?.Dispose();
+ _resourceOverviewMemoryHover?.Dispose();
+ _resourceOverviewIoHover?.Dispose();
+ _resourceOverviewWaitHover?.Dispose();
+ _lockWaitStatsHover?.Dispose();
+ _blockingEventsHover?.Dispose();
+ _blockingDurationHover?.Dispose();
+ _deadlocksHover?.Dispose();
+ _deadlockWaitTimeHover?.Dispose();
+ _collectorDurationHover?.Dispose();
+ _currentWaitsDurationHover?.Dispose();
+ _currentWaitsBlockedHover?.Dispose();
+
+ MemoryTab.DisposeChartHelpers();
+ ResourceMetricsContent.DisposeChartHelpers();
+ PerformanceTab.DisposeChartHelpers();
}
private void OnThemeChanged(string _)
@@ -400,7 +491,9 @@ private void OnThemeChanged(string _)
public void RefreshAutoRefreshSettings()
{
- // Stop existing timer
+ // Stop existing loop and timer
+ _autoRefreshCts?.Cancel();
+ _autoRefreshCts = null;
_autoRefreshTimer?.Stop();
_autoRefreshTimer = null;
@@ -409,30 +502,7 @@ public void RefreshAutoRefreshSettings()
if (prefs.AutoRefreshEnabled)
{
- _autoRefreshTimer = new DispatcherTimer
- {
- Interval = TimeSpan.FromSeconds(prefs.AutoRefreshIntervalSeconds)
- };
- _autoRefreshTimer.Tick += async (s, e) =>
- {
- if (_isRefreshing) return;
- _isRefreshing = true;
-
- try
- {
- await LoadDataAsync(fullRefresh: false);
- }
- catch (Exception ex)
- {
- Logger.Error($"Error in auto-refresh: {ex.Message}", ex);
- StatusText.Text = "Auto-refresh error";
- }
- finally
- {
- _isRefreshing = false;
- }
- };
- _autoRefreshTimer.Start();
+ StartAutoRefreshLoop(prefs.AutoRefreshIntervalSeconds);
AutoRefreshToggle.IsChecked = true;
AutoRefreshToggle.Content = $"Auto-Refresh: {prefs.AutoRefreshIntervalSeconds}s";
}
@@ -458,30 +528,7 @@ private void AutoRefreshToggle_Click(object sender, RoutedEventArgs e)
prefs.AutoRefreshEnabled = true;
_preferencesService.SavePreferences(prefs);
- _autoRefreshTimer = new DispatcherTimer
- {
- Interval = TimeSpan.FromSeconds(prefs.AutoRefreshIntervalSeconds)
- };
- _autoRefreshTimer.Tick += async (s, args) =>
- {
- if (_isRefreshing) return;
- _isRefreshing = true;
-
- try
- {
- await LoadDataAsync(fullRefresh: false);
- }
- catch (Exception ex)
- {
- Logger.Error($"Error in auto-refresh: {ex.Message}", ex);
- StatusText.Text = "Auto-refresh error";
- }
- finally
- {
- _isRefreshing = false;
- }
- };
- _autoRefreshTimer.Start();
+ StartAutoRefreshLoop(prefs.AutoRefreshIntervalSeconds);
AutoRefreshToggle.Content = $"Auto-Refresh: {prefs.AutoRefreshIntervalSeconds}s";
}
else
@@ -490,8 +537,7 @@ private void AutoRefreshToggle_Click(object sender, RoutedEventArgs e)
prefs.AutoRefreshEnabled = false;
_preferencesService.SavePreferences(prefs);
- _autoRefreshTimer?.Stop();
- _autoRefreshTimer = null;
+ _autoRefreshCts?.Cancel();
AutoRefreshToggle.Content = "Auto-Refresh: Off";
}
}
@@ -595,6 +641,7 @@ private async void ServerTab_Loaded(object sender, RoutedEventArgs e)
DefaultTraceTab.SetTimeRange(_globalHoursBack, _globalFromDate, _globalToDate);
await LoadDataAsync();
+ SetupAutoRefresh();
}
catch (Exception ex)
{
@@ -1129,6 +1176,15 @@ private async Task ApplyAndRefreshCurrentTabAsync()
///
private async Task LoadDataAsync(bool fullRefresh = true)
{
+ if (_isRefreshing)
+ {
+ // If a previous refresh has been running for over 2 minutes, it's stuck — allow a new one
+ if ((DateTime.UtcNow - _refreshStartedUtc).TotalMinutes < 2) return;
+ Logger.Error($"Previous refresh appears stuck (started {_refreshStartedUtc:HH:mm:ss}), allowing new refresh");
+ }
+ _isRefreshing = true;
+ _refreshStartedUtc = DateTime.UtcNow;
+
using var _ = Helpers.MethodProfiler.StartTiming("ServerTab");
try
{
@@ -1139,12 +1195,19 @@ private async Task LoadDataAsync(bool fullRefresh = true)
if (!connected)
{
StatusText.Text = $"Failed to connect to {_serverConnection.DisplayName}";
- MessageBox.Show(
- $"Could not connect to SQL Server: {_serverConnection.ServerName}\n\nCheck connection settings",
- "Connection Error",
- MessageBoxButton.OK,
- MessageBoxImage.Error
- );
+ if (fullRefresh)
+ {
+ MessageBox.Show(
+ $"Could not connect to SQL Server: {_serverConnection.ServerName}\n\nCheck connection settings",
+ "Connection Error",
+ MessageBoxButton.OK,
+ MessageBoxImage.Error
+ );
+ }
+ else
+ {
+ Logger.Error($"Auto-refresh connection failed for {_serverConnection.DisplayName}");
+ }
return;
}
@@ -1167,16 +1230,24 @@ private async Task LoadDataAsync(bool fullRefresh = true)
catch (Exception ex)
{
StatusText.Text = "Error loading data";
- MessageBox.Show(
- $"Error loading data:\n\n{ex.Message}",
- "Error",
- MessageBoxButton.OK,
- MessageBoxImage.Error
- );
+ if (fullRefresh)
+ {
+ MessageBox.Show(
+ $"Error loading data:\n\n{ex.Message}",
+ "Error",
+ MessageBoxButton.OK,
+ MessageBoxImage.Error
+ );
+ }
+ else
+ {
+ Logger.Error($"Auto-refresh error for {_serverConnection.DisplayName}: {ex.Message}", ex);
+ }
}
finally
{
RefreshButton.IsEnabled = true;
+ _isRefreshing = false;
}
}
@@ -1643,9 +1714,11 @@ private async void DataTabControl_SelectionChanged(object sender, SelectionChang
UpdateCompareDropdownState();
// Don't refresh during initial load or if already refreshing
- if (_isRefreshing || !IsLoaded) return;
+ if (!IsLoaded) return;
+ if (_isRefreshing && (DateTime.UtcNow - _refreshStartedUtc).TotalMinutes < 2) return;
_isRefreshing = true;
+ _refreshStartedUtc = DateTime.UtcNow;
try
{
await RefreshVisibleTabAsync();
diff --git a/Dashboard/Services/DatabaseService.FinOps.cs b/Dashboard/Services/DatabaseService.FinOps.cs
index 2df3b33e..752e02e0 100644
--- a/Dashboard/Services/DatabaseService.FinOps.cs
+++ b/Dashboard/Services/DatabaseService.FinOps.cs
@@ -391,6 +391,20 @@ public static async Task GetServerPropertiesLiveAsync(str
await connection.OpenAsync();
const string query = @"
+DECLARE @host_os nvarchar(256);
+IF OBJECT_ID(N'sys.dm_os_host_info', N'V') IS NOT NULL
+ EXEC sys.sp_executesql N'SELECT @os = host_distribution FROM sys.dm_os_host_info',
+ N'@os nvarchar(256) OUTPUT', @os = @host_os OUTPUT;
+
+IF @host_os IS NULL
+BEGIN
+ /* SQL 2016 or Azure SQL DB: parse OS from @@VERSION */
+ DECLARE @ver nvarchar(4000) = @@VERSION;
+ DECLARE @on_pos int = CHARINDEX(N' on ', @ver);
+ IF @on_pos > 0
+ SET @host_os = LTRIM(SUBSTRING(@ver, @on_pos + 4, LEN(@ver)));
+END;
+
SELECT
edition =
CONVERT(nvarchar(256), SERVERPROPERTY('Edition')),
@@ -417,7 +431,9 @@ public static async Task GetServerPropertiesLiveAsync(str
is_hadr_enabled =
CONVERT(int, SERVERPROPERTY('IsHadrEnabled')),
is_clustered =
- CONVERT(int, SERVERPROPERTY('IsClustered'))
+ CONVERT(int, SERVERPROPERTY('IsClustered')),
+ host_os =
+ @host_os
FROM sys.dm_os_sys_info AS si;";
using var command = new SqlCommand(query, connection);
@@ -446,6 +462,7 @@ public static async Task GetServerPropertiesLiveAsync(str
EngineEdition = reader.IsDBNull(10) ? null : Convert.ToInt32(reader.GetValue(10)),
IsHadrEnabled = reader.IsDBNull(11) ? null : Convert.ToInt32(reader.GetValue(11)) == 1,
IsClustered = reader.IsDBNull(12) ? null : Convert.ToInt32(reader.GetValue(12)) == 1,
+ HostOsVersion = reader.IsDBNull(13) ? "" : reader.GetString(13),
LastUpdated = DateTime.Now
};
}
@@ -2496,6 +2513,7 @@ public class FinOpsServerInventory
public string ServerName { get; set; } = "";
public string Edition { get; set; } = "";
public string SqlVersion { get; set; } = "";
+ public string HostOsVersion { get; set; } = "";
public int CpuCount { get; set; }
public long PhysicalMemoryMb { get; set; }
public int? SocketCount { get; set; }
diff --git a/Dashboard/Services/DatabaseService.QueryPerformance.cs b/Dashboard/Services/DatabaseService.QueryPerformance.cs
index 9e59094c..bd0e1825 100644
--- a/Dashboard/Services/DatabaseService.QueryPerformance.cs
+++ b/Dashboard/Services/DatabaseService.QueryPerformance.cs
@@ -4145,5 +4145,638 @@ ON ISNULL(c.database_name, N'') = ISNULL(b.database_name, N'')
return items;
}
+
+ // ============================================
+ // MCP-Optimized Query Methods
+ // ============================================
+ // These methods use multi-phase temp tables to avoid
+ // decompressing text on every row. Only the TOP N winners
+ // get hydrated with query text. The shared UI methods above
+ // are intentionally untouched — the UI needs all rows.
+
+ ///
+ /// MCP-optimized query stats: aggregate numerics first, rank TOP N, then hydrate text.
+ ///
+ public async Task> GetQueryStatsForMcpAsync(
+ int hoursBack, int top, string? databaseName = null,
+ bool parallelOnly = false, int minDop = 0)
+ {
+ var items = new List();
+
+ await using var tc = await OpenThrottledConnectionAsync();
+ var connection = tc.Connection;
+
+ string query = @"
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+/*Phase 1: aggregate per-lifetime — numeric only, no DECOMPRESS*/
+DROP TABLE IF EXISTS #per_lifetime;
+
+SELECT
+ qs.database_name,
+ qs.query_hash,
+ qs.creation_time,
+ object_type = MAX(qs.object_type),
+ schema_name = MAX(qs.schema_name),
+ object_name = MAX(qs.object_name),
+ last_execution_time = MAX(qs.last_execution_time),
+ execution_count = MAX(qs.execution_count),
+ total_worker_time = MAX(qs.total_worker_time),
+ min_worker_time = MIN(qs.min_worker_time),
+ max_worker_time = MAX(qs.max_worker_time),
+ total_elapsed_time = MAX(qs.total_elapsed_time),
+ min_elapsed_time = MIN(qs.min_elapsed_time),
+ max_elapsed_time = MAX(qs.max_elapsed_time),
+ total_logical_reads = MAX(qs.total_logical_reads),
+ total_logical_writes = MAX(qs.total_logical_writes),
+ total_physical_reads = MAX(qs.total_physical_reads),
+ min_physical_reads = MIN(qs.min_physical_reads),
+ max_physical_reads = MAX(qs.max_physical_reads),
+ total_rows = MAX(qs.total_rows),
+ min_rows = MIN(qs.min_rows),
+ max_rows = MAX(qs.max_rows),
+ min_dop = MIN(qs.min_dop),
+ max_dop = MAX(qs.max_dop),
+ min_grant_kb = MIN(qs.min_grant_kb),
+ max_grant_kb = MAX(qs.max_grant_kb),
+ total_spills = MAX(qs.total_spills),
+ min_spills = MIN(qs.min_spills),
+ max_spills = MAX(qs.max_spills),
+ query_plan_hash = MAX(qs.query_plan_hash),
+ sql_handle = MAX(qs.sql_handle),
+ plan_handle = MAX(qs.plan_handle)
+INTO #per_lifetime
+FROM collect.query_stats AS qs
+WHERE qs.last_execution_time >= DATEADD(HOUR, -@hoursBack, SYSDATETIME())
+AND (@databaseName IS NULL OR qs.database_name = @databaseName)
+GROUP BY
+ qs.database_name,
+ qs.query_hash,
+ qs.creation_time
+OPTION
+(
+ HASH GROUP,
+ USE HINT('ENABLE_PARALLEL_PLAN_PREFERENCE')
+);
+
+/*Phase 2: sum across lifetimes, rank, take TOP N+5*/
+DROP TABLE IF EXISTS #top_ranked;
+
+SELECT TOP (@top + 5)
+ database_name = pl.database_name,
+ query_hash = CONVERT(nvarchar(20), pl.query_hash, 1),
+ object_type = MAX(pl.object_type),
+ object_name =
+ CASE MAX(pl.object_type)
+ WHEN 'STATEMENT'
+ THEN N'Adhoc'
+ ELSE QUOTENAME(MAX(pl.schema_name)) + N'.' + QUOTENAME(MAX(pl.object_name))
+ END,
+ first_execution_time = MIN(pl.creation_time),
+ last_execution_time = MAX(pl.last_execution_time),
+ execution_count = SUM(pl.execution_count),
+ total_worker_time = SUM(pl.total_worker_time),
+ avg_worker_time_ms = SUM(pl.total_worker_time) / 1000.0 / NULLIF(SUM(pl.execution_count), 0),
+ min_worker_time_ms = MIN(pl.min_worker_time) / 1000.0,
+ max_worker_time_ms = MAX(pl.max_worker_time) / 1000.0,
+ total_elapsed_time = SUM(pl.total_elapsed_time),
+ avg_elapsed_time_ms = SUM(pl.total_elapsed_time) / 1000.0 / NULLIF(SUM(pl.execution_count), 0),
+ min_elapsed_time_ms = MIN(pl.min_elapsed_time) / 1000.0,
+ max_elapsed_time_ms = MAX(pl.max_elapsed_time) / 1000.0,
+ total_logical_reads = SUM(pl.total_logical_reads),
+ avg_logical_reads = SUM(pl.total_logical_reads) / NULLIF(SUM(pl.execution_count), 0),
+ total_logical_writes = SUM(pl.total_logical_writes),
+ avg_logical_writes = SUM(pl.total_logical_writes) / NULLIF(SUM(pl.execution_count), 0),
+ total_physical_reads = SUM(pl.total_physical_reads),
+ avg_physical_reads = SUM(pl.total_physical_reads) / NULLIF(SUM(pl.execution_count), 0),
+ min_physical_reads = MIN(pl.min_physical_reads),
+ max_physical_reads = MAX(pl.max_physical_reads),
+ total_rows = SUM(pl.total_rows),
+ avg_rows = SUM(pl.total_rows) / NULLIF(SUM(pl.execution_count), 0),
+ min_rows = MIN(pl.min_rows),
+ max_rows = MAX(pl.max_rows),
+ min_dop = MIN(pl.min_dop),
+ max_dop = MAX(pl.max_dop),
+ min_grant_kb = MIN(pl.min_grant_kb),
+ max_grant_kb = MAX(pl.max_grant_kb),
+ total_spills = SUM(pl.total_spills),
+ min_spills = MIN(pl.min_spills),
+ max_spills = MAX(pl.max_spills),
+ query_plan_hash = CONVERT(nvarchar(20), MAX(pl.query_plan_hash), 1),
+ sql_handle = CONVERT(nvarchar(130), MAX(pl.sql_handle), 1),
+ plan_handle = CONVERT(nvarchar(130), MAX(pl.plan_handle), 1)
+INTO #top_ranked
+FROM #per_lifetime AS pl
+WHERE (@parallelOnly = 0 OR pl.max_dop > 1)
+AND (@minDop = 0 OR pl.max_dop >= @minDop)
+GROUP BY
+ pl.database_name,
+ pl.query_hash
+ORDER BY
+ avg_worker_time_ms DESC
+OPTION
+(
+ HASH GROUP
+);
+
+/*Phase 3: hydrate text for winners only, apply WAITFOR filter*/
+SELECT TOP (@top)
+ tr.database_name,
+ tr.query_hash,
+ tr.object_type,
+ tr.object_name,
+ tr.first_execution_time,
+ tr.last_execution_time,
+ tr.execution_count,
+ tr.total_worker_time,
+ tr.avg_worker_time_ms,
+ tr.min_worker_time_ms,
+ tr.max_worker_time_ms,
+ tr.total_elapsed_time,
+ tr.avg_elapsed_time_ms,
+ tr.min_elapsed_time_ms,
+ tr.max_elapsed_time_ms,
+ tr.total_logical_reads,
+ tr.avg_logical_reads,
+ tr.total_logical_writes,
+ tr.avg_logical_writes,
+ tr.total_physical_reads,
+ tr.avg_physical_reads,
+ tr.min_physical_reads,
+ tr.max_physical_reads,
+ tr.total_rows,
+ tr.avg_rows,
+ tr.min_rows,
+ tr.max_rows,
+ tr.min_dop,
+ tr.max_dop,
+ tr.min_grant_kb,
+ tr.max_grant_kb,
+ tr.total_spills,
+ tr.min_spills,
+ tr.max_spills,
+ tr.query_plan_hash,
+ tr.sql_handle,
+ tr.plan_handle,
+ qt.query_text
+FROM #top_ranked AS tr
+OUTER APPLY
+(
+ SELECT TOP (1)
+ query_text = CAST(DECOMPRESS(qs2.query_text) AS nvarchar(max))
+ FROM collect.query_stats AS qs2
+ WHERE qs2.query_hash = CONVERT(binary(8), tr.query_hash, 1)
+ AND qs2.database_name = tr.database_name
+ ORDER BY qs2.collection_time DESC
+) AS qt
+WHERE qt.query_text IS NULL
+OR qt.query_text NOT LIKE N'WAITFOR%'
+ORDER BY
+ tr.avg_worker_time_ms DESC;";
+
+ using var command = new SqlCommand(query, connection);
+ command.CommandTimeout = 120;
+
+ command.Parameters.Add(new SqlParameter("@hoursBack", SqlDbType.Int) { Value = hoursBack });
+ command.Parameters.Add(new SqlParameter("@top", SqlDbType.Int) { Value = top });
+ command.Parameters.Add(new SqlParameter("@databaseName", SqlDbType.NVarChar, 128) { Value = (object?)databaseName ?? DBNull.Value });
+ command.Parameters.Add(new SqlParameter("@parallelOnly", SqlDbType.Bit) { Value = parallelOnly });
+ command.Parameters.Add(new SqlParameter("@minDop", SqlDbType.Int) { Value = minDop });
+
+ using var reader = await command.ExecuteReaderAsync();
+ while (await reader.ReadAsync())
+ {
+ items.Add(new QueryStatsItem
+ {
+ DatabaseName = reader.IsDBNull(0) ? "" : reader.GetString(0),
+ QueryHash = reader.IsDBNull(1) ? null : reader.GetString(1),
+ ObjectType = reader.IsDBNull(2) ? "" : reader.GetString(2),
+ ObjectName = reader.IsDBNull(3) ? null : reader.GetString(3),
+ FirstExecutionTime = reader.IsDBNull(4) ? null : reader.GetDateTime(4),
+ LastExecutionTime = reader.IsDBNull(5) ? null : reader.GetDateTime(5),
+ ExecutionCount = reader.IsDBNull(6) ? 0 : reader.GetInt64(6),
+ TotalWorkerTime = reader.IsDBNull(7) ? 0 : reader.GetInt64(7),
+ AvgWorkerTimeMs = reader.IsDBNull(8) ? null : Convert.ToDouble(reader.GetValue(8), CultureInfo.InvariantCulture),
+ MinWorkerTimeMs = reader.IsDBNull(9) ? null : Convert.ToDouble(reader.GetValue(9), CultureInfo.InvariantCulture),
+ MaxWorkerTimeMs = reader.IsDBNull(10) ? null : Convert.ToDouble(reader.GetValue(10), CultureInfo.InvariantCulture),
+ TotalElapsedTime = reader.IsDBNull(11) ? 0 : reader.GetInt64(11),
+ AvgElapsedTimeMs = reader.IsDBNull(12) ? null : Convert.ToDouble(reader.GetValue(12), CultureInfo.InvariantCulture),
+ MinElapsedTimeMs = reader.IsDBNull(13) ? null : Convert.ToDouble(reader.GetValue(13), CultureInfo.InvariantCulture),
+ MaxElapsedTimeMs = reader.IsDBNull(14) ? null : Convert.ToDouble(reader.GetValue(14), CultureInfo.InvariantCulture),
+ TotalLogicalReads = reader.IsDBNull(15) ? 0 : reader.GetInt64(15),
+ AvgLogicalReads = reader.IsDBNull(16) ? null : reader.GetInt64(16),
+ TotalLogicalWrites = reader.IsDBNull(17) ? 0 : reader.GetInt64(17),
+ AvgLogicalWrites = reader.IsDBNull(18) ? null : reader.GetInt64(18),
+ TotalPhysicalReads = reader.IsDBNull(19) ? 0 : reader.GetInt64(19),
+ AvgPhysicalReads = reader.IsDBNull(20) ? null : reader.GetInt64(20),
+ MinPhysicalReads = reader.IsDBNull(21) ? null : reader.GetInt64(21),
+ MaxPhysicalReads = reader.IsDBNull(22) ? null : reader.GetInt64(22),
+ TotalRows = reader.IsDBNull(23) ? 0 : reader.GetInt64(23),
+ AvgRows = reader.IsDBNull(24) ? null : reader.GetInt64(24),
+ MinRows = reader.IsDBNull(25) ? null : reader.GetInt64(25),
+ MaxRows = reader.IsDBNull(26) ? null : reader.GetInt64(26),
+ MinDop = reader.IsDBNull(27) ? null : Convert.ToInt16(reader.GetValue(27)),
+ MaxDop = reader.IsDBNull(28) ? null : Convert.ToInt16(reader.GetValue(28)),
+ MinGrantKb = reader.IsDBNull(29) ? null : reader.GetInt64(29),
+ MaxGrantKb = reader.IsDBNull(30) ? null : reader.GetInt64(30),
+ TotalSpills = reader.IsDBNull(31) ? 0 : reader.GetInt64(31),
+ MinSpills = reader.IsDBNull(32) ? null : reader.GetInt64(32),
+ MaxSpills = reader.IsDBNull(33) ? null : reader.GetInt64(33),
+ QueryPlanHash = reader.IsDBNull(34) ? null : reader.GetString(34),
+ SqlHandle = reader.IsDBNull(35) ? null : reader.GetString(35),
+ PlanHandle = reader.IsDBNull(36) ? null : reader.GetString(36),
+ QueryText = reader.IsDBNull(37) ? null : reader.GetString(37),
+ QueryPlanXml = null
+ });
+ }
+
+ return items;
+ }
+
+ ///
+ /// MCP-optimized procedure stats: aggregate numerics first, rank TOP N.
+ /// No text hydration needed — procedure names are sysname columns, not compressed.
+ ///
+ public async Task> GetProcedureStatsForMcpAsync(
+ int hoursBack, int top, string? databaseName = null)
+ {
+ var items = new List();
+
+ await using var tc = await OpenThrottledConnectionAsync();
+ var connection = tc.Connection;
+
+ string query = @"
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+/*Phase 1: aggregate per-lifetime — numeric only, no DECOMPRESS*/
+DROP TABLE IF EXISTS #per_lifetime;
+
+SELECT
+ ps.database_name,
+ ps.schema_name,
+ ps.object_name,
+ ps.cached_time,
+ object_id = MAX(ps.object_id),
+ object_type = MAX(ps.object_type),
+ type_desc = MAX(ps.type_desc),
+ last_execution_time = MAX(ps.last_execution_time),
+ execution_count = MAX(ps.execution_count),
+ total_worker_time = MAX(ps.total_worker_time),
+ min_worker_time = MIN(ps.min_worker_time),
+ max_worker_time = MAX(ps.max_worker_time),
+ total_elapsed_time = MAX(ps.total_elapsed_time),
+ min_elapsed_time = MIN(ps.min_elapsed_time),
+ max_elapsed_time = MAX(ps.max_elapsed_time),
+ total_logical_reads = MAX(ps.total_logical_reads),
+ min_logical_reads = MIN(ps.min_logical_reads),
+ max_logical_reads = MAX(ps.max_logical_reads),
+ total_logical_writes = MAX(ps.total_logical_writes),
+ min_logical_writes = MIN(ps.min_logical_writes),
+ max_logical_writes = MAX(ps.max_logical_writes),
+ total_physical_reads = MAX(ps.total_physical_reads),
+ min_physical_reads = MIN(ps.min_physical_reads),
+ max_physical_reads = MAX(ps.max_physical_reads),
+ total_spills = MAX(ps.total_spills),
+ min_spills = MIN(ps.min_spills),
+ max_spills = MAX(ps.max_spills),
+ sql_handle = MAX(ps.sql_handle),
+ plan_handle = MAX(ps.plan_handle)
+INTO #per_lifetime
+FROM collect.procedure_stats AS ps
+WHERE ps.last_execution_time >= DATEADD(HOUR, -@hoursBack, SYSDATETIME())
+AND (@databaseName IS NULL OR ps.database_name = @databaseName)
+GROUP BY
+ ps.database_name,
+ ps.schema_name,
+ ps.object_name,
+ ps.cached_time
+OPTION
+(
+ HASH GROUP,
+ USE HINT('ENABLE_PARALLEL_PLAN_PREFERENCE')
+);
+
+/*Phase 2: sum across lifetimes, rank, return TOP N*/
+SELECT TOP (@top)
+ database_name = pl.database_name,
+ object_id = MAX(pl.object_id),
+ object_name = QUOTENAME(pl.schema_name) + N'.' + QUOTENAME(pl.object_name),
+ schema_name = pl.schema_name,
+ procedure_name = pl.object_name,
+ object_type = MAX(pl.object_type),
+ type_desc = MAX(pl.type_desc),
+ first_cached_time = MIN(pl.cached_time),
+ last_execution_time = MAX(pl.last_execution_time),
+ execution_count = SUM(pl.execution_count),
+ total_worker_time = SUM(pl.total_worker_time),
+ avg_worker_time_ms = SUM(pl.total_worker_time) / 1000.0 / NULLIF(SUM(pl.execution_count), 0),
+ min_worker_time_ms = MIN(pl.min_worker_time) / 1000.0,
+ max_worker_time_ms = MAX(pl.max_worker_time) / 1000.0,
+ total_elapsed_time = SUM(pl.total_elapsed_time),
+ avg_elapsed_time_ms = SUM(pl.total_elapsed_time) / 1000.0 / NULLIF(SUM(pl.execution_count), 0),
+ min_elapsed_time_ms = MIN(pl.min_elapsed_time) / 1000.0,
+ max_elapsed_time_ms = MAX(pl.max_elapsed_time) / 1000.0,
+ total_logical_reads = SUM(pl.total_logical_reads),
+ avg_logical_reads = SUM(pl.total_logical_reads) / NULLIF(SUM(pl.execution_count), 0),
+ min_logical_reads = MIN(pl.min_logical_reads),
+ max_logical_reads = MAX(pl.max_logical_reads),
+ total_logical_writes = SUM(pl.total_logical_writes),
+ avg_logical_writes = SUM(pl.total_logical_writes) / NULLIF(SUM(pl.execution_count), 0),
+ min_logical_writes = MIN(pl.min_logical_writes),
+ max_logical_writes = MAX(pl.max_logical_writes),
+ total_physical_reads = SUM(pl.total_physical_reads),
+ avg_physical_reads = SUM(pl.total_physical_reads) / NULLIF(SUM(pl.execution_count), 0),
+ min_physical_reads = MIN(pl.min_physical_reads),
+ max_physical_reads = MAX(pl.max_physical_reads),
+ total_spills = SUM(pl.total_spills),
+ avg_spills = SUM(pl.total_spills) / NULLIF(SUM(pl.execution_count), 0),
+ min_spills = MIN(pl.min_spills),
+ max_spills = MAX(pl.max_spills),
+ sql_handle = CONVERT(nvarchar(130), MAX(pl.sql_handle), 1),
+ plan_handle = CONVERT(nvarchar(130), MAX(pl.plan_handle), 1)
+FROM #per_lifetime AS pl
+GROUP BY
+ pl.database_name,
+ pl.schema_name,
+ pl.object_name
+ORDER BY
+ avg_worker_time_ms DESC
+OPTION
+(
+ HASH GROUP
+);";
+
+ using var command = new SqlCommand(query, connection);
+ command.CommandTimeout = 120;
+
+ command.Parameters.Add(new SqlParameter("@hoursBack", SqlDbType.Int) { Value = hoursBack });
+ command.Parameters.Add(new SqlParameter("@top", SqlDbType.Int) { Value = top });
+ command.Parameters.Add(new SqlParameter("@databaseName", SqlDbType.NVarChar, 128) { Value = (object?)databaseName ?? DBNull.Value });
+
+ using var reader = await command.ExecuteReaderAsync();
+ while (await reader.ReadAsync())
+ {
+ items.Add(new ProcedureStatsItem
+ {
+ DatabaseName = reader.IsDBNull(0) ? "" : reader.GetString(0),
+ ObjectId = reader.IsDBNull(1) ? 0 : reader.GetInt32(1),
+ ObjectName = reader.IsDBNull(2) ? null : reader.GetString(2),
+ SchemaName = reader.IsDBNull(3) ? null : reader.GetString(3),
+ ProcedureName = reader.IsDBNull(4) ? null : reader.GetString(4),
+ ObjectType = reader.IsDBNull(5) ? "" : reader.GetString(5),
+ TypeDesc = reader.IsDBNull(6) ? null : reader.GetString(6),
+ FirstCachedTime = reader.IsDBNull(7) ? null : reader.GetDateTime(7),
+ LastExecutionTime = reader.IsDBNull(8) ? null : reader.GetDateTime(8),
+ ExecutionCount = reader.IsDBNull(9) ? 0 : reader.GetInt64(9),
+ TotalWorkerTime = reader.IsDBNull(10) ? 0 : reader.GetInt64(10),
+ AvgWorkerTimeMs = reader.IsDBNull(11) ? null : Convert.ToDouble(reader.GetValue(11), CultureInfo.InvariantCulture),
+ MinWorkerTimeMs = reader.IsDBNull(12) ? null : Convert.ToDouble(reader.GetValue(12), CultureInfo.InvariantCulture),
+ MaxWorkerTimeMs = reader.IsDBNull(13) ? null : Convert.ToDouble(reader.GetValue(13), CultureInfo.InvariantCulture),
+ TotalElapsedTime = reader.IsDBNull(14) ? 0 : reader.GetInt64(14),
+ AvgElapsedTimeMs = reader.IsDBNull(15) ? null : Convert.ToDouble(reader.GetValue(15), CultureInfo.InvariantCulture),
+ MinElapsedTimeMs = reader.IsDBNull(16) ? null : Convert.ToDouble(reader.GetValue(16), CultureInfo.InvariantCulture),
+ MaxElapsedTimeMs = reader.IsDBNull(17) ? null : Convert.ToDouble(reader.GetValue(17), CultureInfo.InvariantCulture),
+ TotalLogicalReads = reader.IsDBNull(18) ? 0 : reader.GetInt64(18),
+ AvgLogicalReads = reader.IsDBNull(19) ? null : reader.GetInt64(19),
+ MinLogicalReads = reader.IsDBNull(20) ? null : reader.GetInt64(20),
+ MaxLogicalReads = reader.IsDBNull(21) ? null : reader.GetInt64(21),
+ TotalLogicalWrites = reader.IsDBNull(22) ? 0 : reader.GetInt64(22),
+ AvgLogicalWrites = reader.IsDBNull(23) ? null : reader.GetInt64(23),
+ MinLogicalWrites = reader.IsDBNull(24) ? null : reader.GetInt64(24),
+ MaxLogicalWrites = reader.IsDBNull(25) ? null : reader.GetInt64(25),
+ TotalPhysicalReads = reader.IsDBNull(26) ? 0 : reader.GetInt64(26),
+ AvgPhysicalReads = reader.IsDBNull(27) ? null : reader.GetInt64(27),
+ MinPhysicalReads = reader.IsDBNull(28) ? null : reader.GetInt64(28),
+ MaxPhysicalReads = reader.IsDBNull(29) ? null : reader.GetInt64(29),
+ TotalSpills = reader.IsDBNull(30) ? 0 : reader.GetInt64(30),
+ AvgSpills = reader.IsDBNull(31) ? null : reader.GetInt64(31),
+ MinSpills = reader.IsDBNull(32) ? null : reader.GetInt64(32),
+ MaxSpills = reader.IsDBNull(33) ? null : reader.GetInt64(33),
+ SqlHandle = reader.IsDBNull(34) ? null : reader.GetString(34),
+ PlanHandle = reader.IsDBNull(35) ? null : reader.GetString(35),
+ QueryPlanXml = null
+ });
+ }
+
+ return items;
+ }
+
+ ///
+ /// MCP-optimized Query Store: aggregate numerics first, rank TOP N, then hydrate text.
+ ///
+ public async Task> GetQueryStoreDataForMcpAsync(
+ int hoursBack, int top, string? databaseName = null,
+ bool parallelOnly = false, int minDop = 0)
+ {
+ var items = new List();
+
+ await using var tc = await OpenThrottledConnectionAsync();
+ var connection = tc.Connection;
+
+ string query = @"
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+/*Phase 1: aggregate by (database_name, query_id) — numeric only, no DECOMPRESS*/
+DROP TABLE IF EXISTS #top_qs;
+
+SELECT TOP (@top + 5)
+ database_name = qsd.database_name,
+ query_id = qsd.query_id,
+ execution_type_desc = MAX(qsd.execution_type_desc),
+ module_name = MAX(qsd.module_name),
+ first_execution_time = MIN(qsd.server_first_execution_time),
+ last_execution_time = MAX(qsd.server_last_execution_time),
+ execution_count = SUM(qsd.count_executions),
+ plan_count = COUNT_BIG(DISTINCT qsd.plan_id),
+ avg_duration_ms = SUM(qsd.avg_duration * qsd.count_executions) / 1000.0 / NULLIF(SUM(qsd.count_executions), 0),
+ min_duration_ms = MIN(qsd.min_duration) / 1000.0,
+ max_duration_ms = MAX(qsd.max_duration) / 1000.0,
+ avg_cpu_time_ms = SUM(qsd.avg_cpu_time * qsd.count_executions) / 1000.0 / NULLIF(SUM(qsd.count_executions), 0),
+ min_cpu_time_ms = MIN(qsd.min_cpu_time) / 1000.0,
+ max_cpu_time_ms = MAX(qsd.max_cpu_time) / 1000.0,
+ avg_logical_reads = SUM(qsd.avg_logical_io_reads * qsd.count_executions) / NULLIF(SUM(qsd.count_executions), 0),
+ min_logical_reads = MIN(qsd.min_logical_io_reads),
+ max_logical_reads = MAX(qsd.max_logical_io_reads),
+ avg_logical_writes = SUM(qsd.avg_logical_io_writes * qsd.count_executions) / NULLIF(SUM(qsd.count_executions), 0),
+ min_logical_writes = MIN(qsd.min_logical_io_writes),
+ max_logical_writes = MAX(qsd.max_logical_io_writes),
+ avg_physical_reads = SUM(qsd.avg_physical_io_reads * qsd.count_executions) / NULLIF(SUM(qsd.count_executions), 0),
+ min_physical_reads = MIN(qsd.min_physical_io_reads),
+ max_physical_reads = MAX(qsd.max_physical_io_reads),
+ min_dop = MIN(qsd.min_dop),
+ max_dop = MAX(qsd.max_dop),
+ avg_memory_pages = SUM(qsd.avg_query_max_used_memory * qsd.count_executions) / NULLIF(SUM(qsd.count_executions), 0),
+ min_memory_pages = MIN(qsd.min_query_max_used_memory),
+ max_memory_pages = MAX(qsd.max_query_max_used_memory),
+ avg_rowcount = SUM(qsd.avg_rowcount * qsd.count_executions) / NULLIF(SUM(qsd.count_executions), 0),
+ min_rowcount = MIN(qsd.min_rowcount),
+ max_rowcount = MAX(qsd.max_rowcount),
+ avg_tempdb_pages = SUM(ISNULL(qsd.avg_tempdb_space_used, 0) * qsd.count_executions) / NULLIF(SUM(qsd.count_executions), 0),
+ min_tempdb_pages = MIN(qsd.min_tempdb_space_used),
+ max_tempdb_pages = MAX(qsd.max_tempdb_space_used),
+ plan_type = MAX(qsd.plan_type),
+ is_forced_plan = MAX(CONVERT(tinyint, qsd.is_forced_plan)),
+ compatibility_level = MAX(qsd.compatibility_level),
+ query_plan_hash = CONVERT(nvarchar(20), MAX(qsd.query_plan_hash), 1),
+ force_failure_count = SUM(qsd.force_failure_count),
+ last_force_failure_reason_desc = MAX(qsd.last_force_failure_reason_desc),
+ plan_forcing_type = MAX(qsd.plan_forcing_type),
+ min_clr_time_ms = MIN(qsd.min_clr_time) / 1000.0,
+ max_clr_time_ms = MAX(qsd.max_clr_time) / 1000.0,
+ min_num_physical_io_reads = MIN(qsd.min_num_physical_io_reads),
+ max_num_physical_io_reads = MAX(qsd.max_num_physical_io_reads),
+ min_log_bytes_used = MIN(qsd.min_log_bytes_used),
+ max_log_bytes_used = MAX(qsd.max_log_bytes_used)
+INTO #top_qs
+FROM collect.query_store_data AS qsd
+WHERE qsd.server_last_execution_time >= DATEADD(HOUR, -@hoursBack, SYSDATETIME())
+AND (@databaseName IS NULL OR qsd.database_name = @databaseName)
+AND (@parallelOnly = 0 OR qsd.max_dop > 1)
+AND (@minDop = 0 OR qsd.max_dop >= @minDop)
+GROUP BY
+ qsd.database_name,
+ qsd.query_id
+ORDER BY
+ avg_cpu_time_ms DESC
+OPTION
+(
+ HASH GROUP,
+ HASH JOIN,
+ USE HINT('ENABLE_PARALLEL_PLAN_PREFERENCE')
+);
+
+/*Phase 2: hydrate text for winners only, apply WAITFOR filter*/
+SELECT TOP (@top)
+ tq.database_name,
+ tq.query_id,
+ tq.execution_type_desc,
+ tq.module_name,
+ tq.first_execution_time,
+ tq.last_execution_time,
+ tq.execution_count,
+ tq.plan_count,
+ tq.avg_duration_ms,
+ tq.min_duration_ms,
+ tq.max_duration_ms,
+ tq.avg_cpu_time_ms,
+ tq.min_cpu_time_ms,
+ tq.max_cpu_time_ms,
+ tq.avg_logical_reads,
+ tq.min_logical_reads,
+ tq.max_logical_reads,
+ tq.avg_logical_writes,
+ tq.min_logical_writes,
+ tq.max_logical_writes,
+ tq.avg_physical_reads,
+ tq.min_physical_reads,
+ tq.max_physical_reads,
+ tq.min_dop,
+ tq.max_dop,
+ tq.avg_memory_pages,
+ tq.min_memory_pages,
+ tq.max_memory_pages,
+ tq.avg_rowcount,
+ tq.min_rowcount,
+ tq.max_rowcount,
+ tq.avg_tempdb_pages,
+ tq.min_tempdb_pages,
+ tq.max_tempdb_pages,
+ tq.plan_type,
+ tq.is_forced_plan,
+ tq.compatibility_level,
+ tq.query_plan_hash,
+ tq.force_failure_count,
+ tq.last_force_failure_reason_desc,
+ tq.plan_forcing_type,
+ tq.min_clr_time_ms,
+ tq.max_clr_time_ms,
+ tq.min_num_physical_io_reads,
+ tq.max_num_physical_io_reads,
+ tq.min_log_bytes_used,
+ tq.max_log_bytes_used,
+ qt.query_sql_text
+FROM #top_qs AS tq
+OUTER APPLY
+(
+ SELECT TOP (1)
+ query_sql_text = CAST(DECOMPRESS(qsd2.query_sql_text) AS nvarchar(max))
+ FROM collect.query_store_data AS qsd2
+ WHERE qsd2.database_name = tq.database_name
+ AND qsd2.query_id = tq.query_id
+ ORDER BY qsd2.collection_time DESC
+) AS qt
+WHERE qt.query_sql_text IS NULL
+OR qt.query_sql_text NOT LIKE N'WAITFOR%'
+ORDER BY
+ tq.avg_cpu_time_ms DESC;";
+
+ using var command = new SqlCommand(query, connection);
+ command.CommandTimeout = 120;
+
+ command.Parameters.Add(new SqlParameter("@hoursBack", SqlDbType.Int) { Value = hoursBack });
+ command.Parameters.Add(new SqlParameter("@top", SqlDbType.Int) { Value = top });
+ command.Parameters.Add(new SqlParameter("@databaseName", SqlDbType.NVarChar, 128) { Value = (object?)databaseName ?? DBNull.Value });
+ command.Parameters.Add(new SqlParameter("@parallelOnly", SqlDbType.Bit) { Value = parallelOnly });
+ command.Parameters.Add(new SqlParameter("@minDop", SqlDbType.Int) { Value = minDop });
+
+ using var reader = await command.ExecuteReaderAsync();
+ while (await reader.ReadAsync())
+ {
+ items.Add(new QueryStoreItem
+ {
+ DatabaseName = reader.IsDBNull(0) ? "" : reader.GetString(0),
+ QueryId = reader.IsDBNull(1) ? 0 : reader.GetInt64(1),
+ ExecutionTypeDesc = reader.IsDBNull(2) ? null : reader.GetString(2),
+ ModuleName = reader.IsDBNull(3) ? null : reader.GetString(3),
+ FirstExecutionTime = reader.IsDBNull(4) ? null : reader.GetDateTime(4),
+ LastExecutionTime = reader.IsDBNull(5) ? null : reader.GetDateTime(5),
+ ExecutionCount = reader.IsDBNull(6) ? 0 : reader.GetInt64(6),
+ PlanCount = reader.IsDBNull(7) ? 0 : reader.GetInt64(7),
+ AvgDurationMs = reader.IsDBNull(8) ? null : Convert.ToDouble(reader.GetValue(8), CultureInfo.InvariantCulture),
+ MinDurationMs = reader.IsDBNull(9) ? null : Convert.ToDouble(reader.GetValue(9), CultureInfo.InvariantCulture),
+ MaxDurationMs = reader.IsDBNull(10) ? null : Convert.ToDouble(reader.GetValue(10), CultureInfo.InvariantCulture),
+ AvgCpuTimeMs = reader.IsDBNull(11) ? null : Convert.ToDouble(reader.GetValue(11), CultureInfo.InvariantCulture),
+ MinCpuTimeMs = reader.IsDBNull(12) ? null : Convert.ToDouble(reader.GetValue(12), CultureInfo.InvariantCulture),
+ MaxCpuTimeMs = reader.IsDBNull(13) ? null : Convert.ToDouble(reader.GetValue(13), CultureInfo.InvariantCulture),
+ AvgLogicalReads = reader.IsDBNull(14) ? null : reader.GetInt64(14),
+ MinLogicalReads = reader.IsDBNull(15) ? null : reader.GetInt64(15),
+ MaxLogicalReads = reader.IsDBNull(16) ? null : reader.GetInt64(16),
+ AvgLogicalWrites = reader.IsDBNull(17) ? null : reader.GetInt64(17),
+ MinLogicalWrites = reader.IsDBNull(18) ? null : reader.GetInt64(18),
+ MaxLogicalWrites = reader.IsDBNull(19) ? null : reader.GetInt64(19),
+ AvgPhysicalReads = reader.IsDBNull(20) ? null : reader.GetInt64(20),
+ MinPhysicalReads = reader.IsDBNull(21) ? null : reader.GetInt64(21),
+ MaxPhysicalReads = reader.IsDBNull(22) ? null : reader.GetInt64(22),
+ MinDop = reader.IsDBNull(23) ? null : reader.GetInt64(23),
+ MaxDop = reader.IsDBNull(24) ? null : reader.GetInt64(24),
+ AvgMemoryPages = reader.IsDBNull(25) ? null : reader.GetInt64(25),
+ MinMemoryPages = reader.IsDBNull(26) ? null : reader.GetInt64(26),
+ MaxMemoryPages = reader.IsDBNull(27) ? null : reader.GetInt64(27),
+ AvgRowcount = reader.IsDBNull(28) ? null : reader.GetInt64(28),
+ MinRowcount = reader.IsDBNull(29) ? null : reader.GetInt64(29),
+ MaxRowcount = reader.IsDBNull(30) ? null : reader.GetInt64(30),
+ AvgTempdbPages = reader.IsDBNull(31) ? null : reader.GetInt64(31),
+ MinTempdbPages = reader.IsDBNull(32) ? null : reader.GetInt64(32),
+ MaxTempdbPages = reader.IsDBNull(33) ? null : reader.GetInt64(33),
+ PlanType = reader.IsDBNull(34) ? null : reader.GetString(34),
+ IsForcedPlan = !reader.IsDBNull(35) && reader.GetByte(35) == 1,
+ CompatibilityLevel = reader.IsDBNull(36) ? null : reader.GetInt16(36),
+ QueryPlanHash = reader.IsDBNull(37) ? null : reader.GetString(37),
+ ForceFailureCount = reader.IsDBNull(38) ? null : reader.GetInt64(38),
+ LastForceFailureReasonDesc = reader.IsDBNull(39) ? null : reader.GetString(39),
+ PlanForcingType = reader.IsDBNull(40) ? null : reader.GetString(40),
+ MinClrTimeMs = reader.IsDBNull(41) ? null : Convert.ToDouble(reader.GetValue(41), CultureInfo.InvariantCulture),
+ MaxClrTimeMs = reader.IsDBNull(42) ? null : Convert.ToDouble(reader.GetValue(42), CultureInfo.InvariantCulture),
+ MinNumPhysicalIoReads = reader.IsDBNull(43) ? null : reader.GetInt64(43),
+ MaxNumPhysicalIoReads = reader.IsDBNull(44) ? null : reader.GetInt64(44),
+ MinLogBytesUsed = reader.IsDBNull(45) ? null : reader.GetInt64(45),
+ MaxLogBytesUsed = reader.IsDBNull(46) ? null : reader.GetInt64(46),
+ QuerySqlText = reader.IsDBNull(47) ? null : reader.GetString(47),
+ QueryPlanXml = null
+ });
+ }
+
+ return items;
+ }
}
}
diff --git a/Dashboard/Services/DatabaseService.cs b/Dashboard/Services/DatabaseService.cs
index afdd2e8d..5b403852 100644
--- a/Dashboard/Services/DatabaseService.cs
+++ b/Dashboard/Services/DatabaseService.cs
@@ -92,7 +92,8 @@ public static SqlConnectionStringBuilder BuildConnectionString(
string? password = null,
string encryptMode = "Mandatory",
bool trustServerCertificate = false,
- bool readOnlyIntent = false)
+ bool readOnlyIntent = false,
+ bool multiSubnetFailover = false)
{
var builder = new SqlConnectionStringBuilder
{
@@ -101,7 +102,8 @@ public static SqlConnectionStringBuilder BuildConnectionString(
TrustServerCertificate = trustServerCertificate,
IntegratedSecurity = useWindowsAuth,
MultipleActiveResultSets = true,
- ApplicationIntent = readOnlyIntent ? ApplicationIntent.ReadOnly : ApplicationIntent.ReadWrite
+ ApplicationIntent = readOnlyIntent ? ApplicationIntent.ReadOnly : ApplicationIntent.ReadWrite,
+ MultiSubnetFailover = multiSubnetFailover
};
// Set encryption mode
diff --git a/Dashboard/Services/PlanAnalyzer.cs b/Dashboard/Services/PlanAnalyzer.cs
index 6965bbb3..befa192c 100644
--- a/Dashboard/Services/PlanAnalyzer.cs
+++ b/Dashboard/Services/PlanAnalyzer.cs
@@ -38,10 +38,11 @@ public static void Analyze(ParsedPlan plan)
private static void AnalyzeStatement(PlanStatement stmt)
{
// Rule 3: Serial plan with reason
- // Skip: trivial cost (< 0.01), TRIVIAL optimization (can't go parallel anyway),
+ // Skip: cost < 1 (CTFP is an integer so cost < 1 can never go parallel),
+ // TRIVIAL optimization (can't go parallel anyway),
// and 0ms actual elapsed time (not worth flagging).
if (!string.IsNullOrEmpty(stmt.NonParallelPlanReason)
- && stmt.StatementSubTreeCost >= 0.01
+ && stmt.StatementSubTreeCost >= 1.0
&& stmt.StatementOptmLevel != "TRIVIAL"
&& !(stmt.QueryTimeStats != null && stmt.QueryTimeStats.ElapsedTimeMs == 0))
{
@@ -105,12 +106,44 @@ private static void AnalyzeStatement(PlanStatement stmt)
or "NoParallelWithRemoteQuery"
or "NoRemoteParallelismForMatrix";
- stmt.PlanWarnings.Add(new PlanWarning
+ // MaxDOPSetToOne needs special handling: check whether the user explicitly
+ // set MAXDOP 1 in the query text, or if it's a server/db/RG setting.
+ // SQL Server truncates StatementText at ~4,000 characters in plan XML.
+ if (stmt.NonParallelPlanReason == "MaxDOPSetToOne")
{
- WarningType = "Serial Plan",
- Message = $"Query running serially: {reason}.",
- Severity = isActionable ? PlanWarningSeverity.Warning : PlanWarningSeverity.Info
- });
+ var text = stmt.StatementText ?? "";
+ var hasMaxdop1InText = Regex.IsMatch(text, @"MAXDOP\s+1\b", RegexOptions.IgnoreCase);
+ var isTruncated = text.Length >= 3990;
+
+ if (hasMaxdop1InText)
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "Serial Plan",
+ Message = $"Query running serially: {reason}.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ else if (isTruncated)
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "Serial Plan",
+ Message = $"Query running serially: {reason}. MAXDOP 1 may be set at the server, database, resource governor, or query level (query text was truncated).",
+ Severity = PlanWarningSeverity.Info
+ });
+ }
+ // else: not truncated, no MAXDOP 1 in text — server/db/RG setting, suppress entirely
+ }
+ else
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "Serial Plan",
+ Message = $"Query running serially: {reason}.",
+ Severity = isActionable ? PlanWarningSeverity.Warning : PlanWarningSeverity.Info
+ });
+ }
}
// Rule 9: Memory grant issues (statement-level)
@@ -203,8 +236,8 @@ private static void AnalyzeStatement(PlanStatement stmt)
// Rule 20: Local variables without RECOMPILE
// Parameters with no CompiledValue are likely local variables — the optimizer
// cannot sniff their values and uses density-based ("unknown") estimates.
- // Skip trivial statements (simple variable assignments) where estimate quality doesn't matter.
- if (stmt.Parameters.Count > 0 && stmt.StatementSubTreeCost >= 0.01)
+ // Skip statements with cost < 1 (can't go parallel, estimate quality rarely matters).
+ if (stmt.Parameters.Count > 0 && stmt.StatementSubTreeCost >= 1.0)
{
var unsnifffedParams = stmt.Parameters
.Where(p => string.IsNullOrEmpty(p.CompiledValue))
@@ -259,28 +292,33 @@ private static void AnalyzeStatement(PlanStatement stmt)
var speedup = (double)cpu / elapsed;
var efficiency = Math.Max(0.0, Math.Min(100.0, (speedup - 1.0) / (dop - 1.0) * 100.0));
+ // Build targeted advice from wait stats if available
+ var waitAdvice = GetWaitStatsAdvice(stmt.WaitStats);
+
if (speedup < 0.5)
{
// CPU well below Elapsed: threads are waiting, not doing CPU work
var waitPct = (1.0 - speedup) * 100;
+ var advice = waitAdvice ?? "Common causes include spills to tempdb, physical I/O reads, lock or latch contention, and memory grant waits.";
stmt.PlanWarnings.Add(new PlanWarning
{
WarningType = "Parallel Wait Bottleneck",
Message = $"Parallel plan (DOP {dop}, {efficiency:N0}% efficient) with elapsed time ({elapsed:N0}ms) exceeding CPU time ({cpu:N0}ms). " +
$"Approximately {waitPct:N0}% of elapsed time was spent waiting rather than on CPU. " +
- $"Common causes include spills to tempdb, physical I/O reads, lock or latch contention, and memory grant waits.",
+ advice,
Severity = PlanWarningSeverity.Warning
});
}
else if (efficiency < 40)
{
// CPU >= Elapsed but well below DOP potential — parallelism is ineffective
+ var advice = waitAdvice ?? "Look for parallel thread skew, blocking exchanges, or serial zones in the plan that prevent effective parallel execution.";
stmt.PlanWarnings.Add(new PlanWarning
{
WarningType = "Ineffective Parallelism",
Message = $"Parallel plan (DOP {dop}) is only {efficiency:N0}% efficient — CPU time ({cpu:N0}ms) vs elapsed time ({elapsed:N0}ms). " +
$"At DOP {dop}, ideal CPU time would be ~{elapsed * dop:N0}ms. " +
- $"Look for parallel thread skew, blocking exchanges, or serial zones in the plan that prevent effective parallel execution.",
+ advice,
Severity = efficiency < 20 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
});
}
@@ -483,8 +521,11 @@ private static void AnalyzeNode(PlanNode node, PlanStatement stmt)
{
if (node.ActualRows == 0)
{
- // Zero rows is always worth noting — resources were allocated for nothing
- if (node.EstimateRows >= 100)
+ // Zero rows with a significant estimate — only warn on operators that
+ // actually allocate meaningful resources (memory grants for hash/sort/spool).
+ // Skip Parallelism, Bitmap, Compute Scalar, Filter, Concatenation, etc.
+ // where 0 rows is just a consequence of upstream filtering.
+ if (node.EstimateRows >= 100 && AllocatesResources(node))
{
node.Warnings.Add(new PlanWarning
{
@@ -670,14 +711,76 @@ _ when nonSargableReason.StartsWith("Function call", StringComparison.OrdinalIgn
!IsProbeOnly(node.Predicate))
{
var displayPredicate = StripProbeExpressions(node.Predicate);
+ var details = BuildScanImpactDetails(node, stmt);
+ var severity = PlanWarningSeverity.Warning;
+ if (details.CostPct >= 90 || details.ElapsedPct >= 90)
+ severity = PlanWarningSeverity.Critical;
+ var message = "Scan with residual predicate — SQL Server is reading every row and filtering after the fact.";
+ if (!string.IsNullOrEmpty(details.Summary))
+ message += $" {details.Summary}";
+ message += " Check that you have appropriate indexes.";
+ message += $"\nPredicate: {Truncate(displayPredicate, 200)}";
node.Warnings.Add(new PlanWarning
{
WarningType = "Scan With Predicate",
- Message = $"Scan with residual predicate — SQL Server is reading every row and filtering after the fact. Check that you have appropriate indexes.\nPredicate: {Truncate(displayPredicate, 200)}",
- Severity = PlanWarningSeverity.Warning
+ Message = message,
+ Severity = severity
});
}
+ // Rule 32: Cardinality misestimate on expensive scan — likely preventing index usage
+ // When a scan dominates the plan AND the estimate is vastly higher than actual rows,
+ // the optimizer chose a scan because it thought it needed most of the table.
+ // With accurate estimates, it would likely seek instead.
+ if (node.HasActualStats && IsRowstoreScan(node)
+ && node.EstimateRows > 0 && node.ActualRows >= 0 && node.ActualRowsRead > 0)
+ {
+ var impact = BuildScanImpactDetails(node, stmt);
+ var overestimateRatio = node.EstimateRows / Math.Max(1.0, node.ActualRows);
+ var selectivity = (double)node.ActualRows / node.ActualRowsRead;
+
+ // Fire when: scan is >= 50% of plan, estimate is >= 10x actual, and < 10% selectivity
+ if ((impact.CostPct >= 50 || impact.ElapsedPct >= 50)
+ && overestimateRatio >= 10.0
+ && selectivity < 0.10)
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Scan Cardinality Misestimate",
+ Message = $"Estimated {node.EstimateRows:N0} rows but only {node.ActualRows:N0} returned ({selectivity * 100:N3}% of {node.ActualRowsRead:N0} rows read). " +
+ $"The {overestimateRatio:N0}x overestimate likely caused the optimizer to choose a scan instead of a seek. " +
+ $"An index on the predicate columns could dramatically reduce I/O.",
+ Severity = PlanWarningSeverity.Critical
+ });
+ }
+ }
+
+ // Rule 33: Estimated plan CE guess detection — scans with telltale default selectivity
+ // When the optimizer uses a local variable or can't sniff, it falls back to density-based
+ // guesses: 30% (equality), 10% (inequality), 9% (LIKE/between), ~16.43% (sqrt(30%)),
+ // 1% (multi-inequality). On large tables, these guesses can hide the need for an index.
+ if (!node.HasActualStats && IsRowstoreScan(node)
+ && node.TableCardinality >= 100_000 && node.EstimateRows > 0
+ && !string.IsNullOrEmpty(node.Predicate))
+ {
+ var impact = BuildScanImpactDetails(node, stmt);
+ if (impact.CostPct >= 50)
+ {
+ var guessDesc = DetectCeGuess(node.EstimateRows, node.TableCardinality);
+ if (guessDesc != null)
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Estimated Plan CE Guess",
+ Message = $"Estimated {node.EstimateRows:N0} rows from {node.TableCardinality:N0} row table — {guessDesc}. " +
+ $"The optimizer may be using a default guess instead of accurate statistics. " +
+ $"If actual selectivity is much lower, an index on the predicate columns could help significantly.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ }
+ }
+
// Rule 13: Mismatched data types (GetRangeWithMismatchedTypes / GetRangeThroughConvert)
if (node.PhysicalOp == "Compute Scalar" && !string.IsNullOrEmpty(node.DefinedValues))
{
@@ -1073,12 +1176,14 @@ private static bool IsScanOperator(PlanNode node)
if (IsNullCoalesceRegExp().IsMatch(predicate))
return "ISNULL/COALESCE wrapping column";
- // Common function calls on columns
+ // Common function calls on columns — but only if the function wraps a column,
+ // not a parameter/variable. Split on comparison operators to check which side
+ // the function is on. Predicate format: [db].[schema].[table].[col]>func(...)
var funcMatch = FunctionInPredicateRegex.Match(predicate);
if (funcMatch.Success)
{
var funcName = funcMatch.Groups[1].Value.ToUpperInvariant();
- if (funcName != "CONVERT_IMPLICIT")
+ if (funcName != "CONVERT_IMPLICIT" && IsFunctionOnColumnSide(predicate, funcMatch))
return $"Function call ({funcName}) on column";
}
@@ -1431,6 +1536,156 @@ private static string Truncate(string value, int maxLength)
return value.Length <= maxLength ? value : value[..maxLength] + "...";
}
+ ///
+ /// Returns targeted advice based on statement-level wait stats, or null if no waits.
+ /// When the dominant wait type is clear, gives specific guidance instead of generic advice.
+ ///
+ private static string? GetWaitStatsAdvice(List waits)
+ {
+ if (waits.Count == 0)
+ return null;
+
+ var totalMs = waits.Sum(w => w.WaitTimeMs);
+ if (totalMs == 0)
+ return null;
+
+ var top = waits.OrderByDescending(w => w.WaitTimeMs).First();
+ var topPct = (double)top.WaitTimeMs / totalMs * 100;
+
+ // Only give targeted advice if the dominant wait is >= 80% of total wait time
+ if (topPct < 80)
+ return null;
+
+ var waitType = top.WaitType.ToUpperInvariant();
+ var advice = waitType switch
+ {
+ _ when waitType.StartsWith("PAGEIOLATCH", StringComparison.Ordinal) =>
+ $"I/O bound — {topPct:N0}% of wait time is {top.WaitType}. Data is being read from disk rather than memory. Consider adding indexes to reduce I/O, or investigate memory pressure.",
+ _ when waitType.StartsWith("LATCH_", StringComparison.Ordinal) =>
+ $"Latch contention — {topPct:N0}% of wait time is {top.WaitType}.",
+ _ when waitType.StartsWith("LCK_", StringComparison.Ordinal) =>
+ $"Lock contention — {topPct:N0}% of wait time is {top.WaitType}. Other sessions are holding locks that this query needs.",
+ _ when waitType.StartsWith("CXPACKET", StringComparison.Ordinal) || waitType.StartsWith("CXCONSUMER", StringComparison.Ordinal) =>
+ $"Parallel thread skew — {topPct:N0}% of wait time is {top.WaitType}. Work is unevenly distributed across parallel threads.",
+ _ when waitType.Contains("IO_COMPLETION", StringComparison.Ordinal) =>
+ $"I/O bound — {topPct:N0}% of wait time is {top.WaitType}.",
+ _ when waitType.StartsWith("RESOURCE_SEMAPHORE", StringComparison.Ordinal) =>
+ $"Memory grant wait — {topPct:N0}% of wait time is {top.WaitType}. The query had to wait for a memory grant.",
+ _ => $"Dominant wait is {top.WaitType} ({topPct:N0}% of wait time)."
+ };
+
+ return advice;
+ }
+
+ ///
+ /// Returns true for operators that allocate meaningful resources based on row estimates.
+ /// Hash Match (hash table), Sort (sort buffer), Spool (worktable).
+ ///
+ private static bool AllocatesResources(PlanNode node)
+ {
+ var op = node.PhysicalOp;
+ return op.StartsWith("Hash", StringComparison.OrdinalIgnoreCase)
+ || op.StartsWith("Sort", StringComparison.OrdinalIgnoreCase)
+ || op.EndsWith("Spool", StringComparison.OrdinalIgnoreCase);
+ }
+
+ private record ScanImpact(double CostPct, double ElapsedPct, string? Summary);
+
+ ///
+ /// Builds impact details for a scan node: what % of plan time/cost it represents,
+ /// and what fraction of rows survived filtering.
+ ///
+ private static ScanImpact BuildScanImpactDetails(PlanNode node, PlanStatement stmt)
+ {
+ var parts = new List();
+
+ // % of plan cost
+ double costPct = 0;
+ if (stmt.StatementSubTreeCost > 0 && node.EstimatedTotalSubtreeCost > 0)
+ {
+ costPct = node.EstimatedTotalSubtreeCost / stmt.StatementSubTreeCost * 100;
+ if (costPct >= 50)
+ parts.Add($"This scan is {costPct:N0}% of the plan cost.");
+ }
+
+ // % of elapsed time (actual plans)
+ double elapsedPct = 0;
+ if (node.HasActualStats && node.ActualElapsedMs > 0 &&
+ stmt.QueryTimeStats != null && stmt.QueryTimeStats.ElapsedTimeMs > 0)
+ {
+ elapsedPct = (double)node.ActualElapsedMs / stmt.QueryTimeStats.ElapsedTimeMs * 100;
+ if (elapsedPct >= 50)
+ parts.Add($"This scan took {elapsedPct:N0}% of elapsed time.");
+ }
+
+ // Row selectivity: rows returned vs rows read (actual) or vs table cardinality (estimated)
+ if (node.HasActualStats && node.ActualRowsRead > 0 && node.ActualRows < node.ActualRowsRead)
+ {
+ var selectivity = (double)node.ActualRows / node.ActualRowsRead * 100;
+ if (selectivity < 10)
+ parts.Add($"Only {selectivity:N3}% of rows survived filtering ({node.ActualRows:N0} of {node.ActualRowsRead:N0}).");
+ }
+ else if (!node.HasActualStats && node.TableCardinality > 0 && node.EstimateRows < node.TableCardinality)
+ {
+ var selectivity = node.EstimateRows / node.TableCardinality * 100;
+ if (selectivity < 10)
+ parts.Add($"Only {selectivity:N1}% of rows estimated to survive filtering.");
+ }
+
+ return new ScanImpact(costPct, elapsedPct, parts.Count > 0 ? string.Join(" ", parts) : null);
+ }
+
+ ///
+ /// Checks whether a function call in a predicate is on the column side of the comparison.
+ /// Predicate ScalarStrings look like: [db].[schema].[table].[col]>dateadd(day,(0),[@var])
+ /// If the function is only on the parameter/literal side, it's still SARGable.
+ ///
+ private static bool IsFunctionOnColumnSide(string predicate, Match funcMatch)
+ {
+ // Find the comparison operator that splits the predicate into left/right sides.
+ // Operators in ScalarString: >=, <=, <>, >, <, =
+ var compMatch = Regex.Match(predicate, @"(?])([<>=!]{1,2})(?![<>=])");
+ if (!compMatch.Success)
+ return true; // No comparison found — can't determine side, assume worst case
+
+ var compPos = compMatch.Index;
+ var funcPos = funcMatch.Index;
+
+ // Determine which side the function is on
+ var funcSide = funcPos < compPos ? "left" : "right";
+
+ // Check if that side also contains a column reference [...].[...].[...]
+ string side = funcSide == "left"
+ ? predicate[..compPos]
+ : predicate[(compPos + compMatch.Length)..];
+
+ // Column references are multi-part bracket-qualified: [schema].[table].[column]
+ // Variables are [@var] or [@var] — single bracket pair with @ prefix.
+ // Match [identifier].[identifier] (at least two dotted parts) to distinguish columns.
+ return Regex.IsMatch(side, @"\[[^\]@]+\]\.\[");
+ }
+
+ ///
+ /// Detects well-known CE default selectivity guesses by comparing EstimateRows to TableCardinality.
+ /// Returns a description of the guess pattern, or null if no known pattern matches.
+ ///
+ private static string? DetectCeGuess(double estimateRows, double tableCardinality)
+ {
+ if (tableCardinality <= 0) return null;
+ var selectivity = estimateRows / tableCardinality;
+
+ // Known CE guess selectivities with a 2% tolerance band
+ return selectivity switch
+ {
+ >= 0.29 and <= 0.31 => $"matches the 30% equality guess ({selectivity * 100:N1}%)",
+ >= 0.098 and <= 0.102 => $"matches the 10% inequality guess ({selectivity * 100:N1}%)",
+ >= 0.088 and <= 0.092 => $"matches the 9% LIKE/BETWEEN guess ({selectivity * 100:N1}%)",
+ >= 0.155 and <= 0.175 => $"matches the ~16.4% compound predicate guess ({selectivity * 100:N1}%)",
+ >= 0.009 and <= 0.011 => $"matches the 1% multi-inequality guess ({selectivity * 100:N1}%)",
+ _ => null
+ };
+ }
+
[GeneratedRegex(@"\b(CONVERT_IMPLICIT|CONVERT|CAST|isnull|coalesce|datepart|datediff|dateadd|year|month|day|upper|lower|ltrim|rtrim|trim|substring|left|right|charindex|replace|len|datalength|abs|floor|ceiling|round|reverse|stuff|format)\s*\(", RegexOptions.IgnoreCase)]
private static partial Regex FunctionInPredicateRegExp();
[GeneratedRegex(@"\blike\b[^'""]*?N?'%", RegexOptions.IgnoreCase)]
diff --git a/Dashboard/Services/ShowPlanParser.cs b/Dashboard/Services/ShowPlanParser.cs
index f441db9a..d99793ec 100644
--- a/Dashboard/Services/ShowPlanParser.cs
+++ b/Dashboard/Services/ShowPlanParser.cs
@@ -37,8 +37,9 @@ public static ParsedPlan Parse(string xml)
foreach (var batchEl in batches)
{
var batch = new PlanBatch();
- var statementsEl = batchEl.Element(Ns + "Statements");
- if (statementsEl != null)
+ // A Batch can contain multiple elements (e.g., DECLARE + SELECT).
+ // Use Elements() to iterate all of them, not just the first.
+ foreach (var statementsEl in batchEl.Elements(Ns + "Statements"))
{
foreach (var stmtEl in statementsEl.Elements())
{
@@ -204,7 +205,27 @@ private static List ParseStatementAndChildren(XElement stmtEl)
}
}
- if (queryPlanEl == null) return stmt;
+ if (queryPlanEl == null)
+ {
+ // Statements with no QueryPlan (e.g., DECLARE/ASSIGN) still get a synthetic
+ // root node so they appear in the statement tab list.
+ var stmtType = stmt.StatementType.Length > 0
+ ? stmt.StatementType.ToUpperInvariant()
+ : "STATEMENT";
+ stmt.RootNode = new PlanNode
+ {
+ NodeId = -1,
+ PhysicalOp = stmtType,
+ LogicalOp = stmtType,
+ IconName = stmtType switch
+ {
+ "ASSIGN" => "assign",
+ "DECLARE" => "declare",
+ _ => "language_construct_catch_all"
+ }
+ };
+ return stmt;
+ }
ParseStmtAttributes(stmt, stmtEl);
ParseQueryPlanElements(stmt, stmtEl, queryPlanEl);
diff --git a/Installer.Core/DependencyInstaller.cs b/Installer.Core/DependencyInstaller.cs
index 13aad3cb..b2ac49e8 100644
--- a/Installer.Core/DependencyInstaller.cs
+++ b/Installer.Core/DependencyInstaller.cs
@@ -14,23 +14,31 @@ namespace Installer.Core;
///
/// Installs community dependencies (sp_WhoIsActive, DarlingData, First Responder Kit)
-/// from GitHub. Requires an HttpClient — create one instance and dispose when done.
+/// from a local community/ directory or GitHub. Local files are checked first — if
+/// present, the network is not used. This supports air-gapped installations.
///
public sealed class DependencyInstaller : IDisposable
{
private readonly HttpClient _httpClient;
+ private readonly string? _communityDirectory;
private bool _disposed;
- public DependencyInstaller()
+ ///
+ /// Optional path to a community/ directory containing pre-downloaded SQL files.
+ /// When provided and files exist, they are used instead of downloading from GitHub.
+ ///
+ public DependencyInstaller(string? communityDirectory = null)
{
_httpClient = new HttpClient
{
Timeout = TimeSpan.FromSeconds(30)
};
+ _communityDirectory = communityDirectory;
}
///
- /// Install community dependencies from GitHub into the PerformanceMonitor database.
+ /// Install community dependencies into the PerformanceMonitor database.
+ /// Checks the community/ directory first, falls back to GitHub download.
/// Returns the number of successfully installed dependencies.
///
public async Task InstallDependenciesAsync(
@@ -38,21 +46,24 @@ public async Task InstallDependenciesAsync(
IProgress? progress = null,
CancellationToken cancellationToken = default)
{
- var dependencies = new List<(string Name, string Url, string Description)>
+ var dependencies = new List<(string Name, string Url, string LocalFile, string Description)>
{
(
"sp_WhoIsActive",
"https://raw.githubusercontent.com/amachanic/sp_whoisactive/refs/heads/master/sp_WhoIsActive.sql",
+ "sp_WhoIsActive.sql",
"Query activity monitoring by Adam Machanic (GPLv3)"
),
(
"DarlingData",
"https://raw.githubusercontent.com/erikdarlingdata/DarlingData/main/Install-All/DarlingData.sql",
+ "DarlingData.sql",
"sp_HealthParser, sp_HumanEventsBlockViewer by Erik Darling (MIT)"
),
(
"First Responder Kit",
"https://raw.githubusercontent.com/BrentOzarULTD/SQL-Server-First-Responder-Kit/refs/heads/main/Install-All-Scripts.sql",
+ "Install-All-Scripts.sql",
"sp_BlitzLock and diagnostic tools by Brent Ozar Unlimited (MIT)"
)
};
@@ -65,7 +76,7 @@ public async Task InstallDependenciesAsync(
int successCount = 0;
- foreach (var (name, url, description) in dependencies)
+ foreach (var (name, url, localFile, description) in dependencies)
{
cancellationToken.ThrowIfCancellationRequested();
@@ -78,15 +89,40 @@ public async Task InstallDependenciesAsync(
try
{
var depSw = Stopwatch.StartNew();
- progress?.Report(new InstallationProgress { Message = $"[DEBUG] Downloading {name} from {url}", Status = "Debug" });
- string sql = await DownloadWithRetryAsync(url, progress, cancellationToken: cancellationToken).ConfigureAwait(false);
- progress?.Report(new InstallationProgress { Message = $"[DEBUG] {name}: downloaded {sql.Length} chars in {depSw.ElapsedMilliseconds}ms", Status = "Debug" });
+ string sql;
+
+ /* Check community/ directory first */
+ string? localPath = ResolveLocalFile(localFile);
+ if (localPath != null)
+ {
+ progress?.Report(new InstallationProgress
+ {
+ Message = $"[DEBUG] {name}: loading from {localPath}",
+ Status = "Debug"
+ });
+ sql = await File.ReadAllTextAsync(localPath, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ progress?.Report(new InstallationProgress
+ {
+ Message = $"[DEBUG] Downloading {name} from {url}",
+ Status = "Debug"
+ });
+ sql = await DownloadWithRetryAsync(url, progress, cancellationToken: cancellationToken).ConfigureAwait(false);
+ }
+
+ progress?.Report(new InstallationProgress
+ {
+ Message = $"[DEBUG] {name}: {(localPath != null ? "loaded" : "downloaded")} {sql.Length} chars in {depSw.ElapsedMilliseconds}ms",
+ Status = "Debug"
+ });
if (string.IsNullOrWhiteSpace(sql))
{
progress?.Report(new InstallationProgress
{
- Message = $"{name} - FAILED (empty response)",
+ Message = $"{name} - FAILED (empty {(localPath != null ? "file" : "response")})",
Status = "Error"
});
continue;
@@ -115,9 +151,10 @@ public async Task InstallDependenciesAsync(
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
+ string source = localPath != null ? "local" : "GitHub";
progress?.Report(new InstallationProgress
{
- Message = $"{name} - Success ({description})",
+ Message = $"{name} - Success ({description}) [{source}]",
Status = "Success"
});
@@ -158,6 +195,19 @@ public async Task InstallDependenciesAsync(
return successCount;
}
+ ///
+ /// Checks the community directory for a local copy of the dependency file.
+ /// Returns the full path if found, null otherwise.
+ ///
+ private string? ResolveLocalFile(string fileName)
+ {
+ if (string.IsNullOrEmpty(_communityDirectory) || !Directory.Exists(_communityDirectory))
+ return null;
+
+ string path = Path.Combine(_communityDirectory, fileName);
+ return File.Exists(path) ? path : null;
+ }
+
private async Task DownloadWithRetryAsync(
string url,
IProgress? progress = null,
diff --git a/Installer.Core/Installer.Core.csproj b/Installer.Core/Installer.Core.csproj
index fbf0387b..bb5f1f32 100644
--- a/Installer.Core/Installer.Core.csproj
+++ b/Installer.Core/Installer.Core.csproj
@@ -7,10 +7,10 @@
Installer.Core
Installer.Core
SQL Server Performance Monitor Installer Core
- 2.6.0
- 2.6.0.0
- 2.6.0.0
- 2.6.0
+ 2.7.0
+ 2.7.0.0
+ 2.7.0.0
+ 2.7.0
Darling Data, LLC
Copyright (c) 2026 Darling Data, LLC
true
diff --git a/Installer.Core/ScriptProvider.cs b/Installer.Core/ScriptProvider.cs
index b136226a..40f1823e 100644
--- a/Installer.Core/ScriptProvider.cs
+++ b/Installer.Core/ScriptProvider.cs
@@ -152,7 +152,7 @@ protected static List FilterUpgrades(
return candidates
.Where(x => x.FromVersion != null && x.ToVersion != null)
- .Where(x => x.FromVersion >= current)
+ .Where(x => x.ToVersion > current)
.Where(x => x.ToVersion <= target)
.OrderBy(x => x.FromVersion)
.ToList();
diff --git a/Installer.Tests/UpgradeOrderingTests.cs b/Installer.Tests/UpgradeOrderingTests.cs
index 20400707..1c5aef21 100644
--- a/Installer.Tests/UpgradeOrderingTests.cs
+++ b/Installer.Tests/UpgradeOrderingTests.cs
@@ -148,6 +148,23 @@ public void DoesNotIncludeFutureUpgrades()
Assert.DoesNotContain(upgrades, u => u.FolderName == "2.2.0-to-2.3.0");
}
+ [Fact]
+ public void PatchVersion_GetsUpgradeFromPriorMinor()
+ {
+ // Regression test for #817: user on v2.4.1 should still get the
+ // 2.4.0-to-2.5.0 upgrade applied (patch version within range)
+ using var dir = new TempDirectoryBuilder()
+ .WithUpgrade("2.3.0", "2.4.0", "01_a.sql")
+ .WithUpgrade("2.4.0", "2.5.0", "01_b.sql")
+ .WithUpgrade("2.5.0", "2.6.0", "01_c.sql");
+
+ var upgrades = ScriptProvider.FromDirectory(dir.RootPath).GetApplicableUpgrades("2.4.1", "2.6.0");
+
+ Assert.Equal(2, upgrades.Count);
+ Assert.Equal("2.4.0-to-2.5.0", upgrades[0].FolderName);
+ Assert.Equal("2.5.0-to-2.6.0", upgrades[1].FolderName);
+ }
+
[Fact]
public void EmbeddedResources_FindsUpgradeFolders()
{
diff --git a/Installer/PerformanceMonitorInstaller.csproj b/Installer/PerformanceMonitorInstaller.csproj
index 7c3fa1a9..a8293d15 100644
--- a/Installer/PerformanceMonitorInstaller.csproj
+++ b/Installer/PerformanceMonitorInstaller.csproj
@@ -20,10 +20,10 @@
PerformanceMonitorInstaller
SQL Server Performance Monitor Installer
- 2.6.0
- 2.6.0.0
- 2.6.0.0
- 2.6.0
+ 2.7.0
+ 2.7.0.0
+ 2.7.0.0
+ 2.7.0
Darling Data, LLC
Copyright © 2026 Darling Data, LLC
Installation utility for SQL Server Performance Monitor - Supports SQL Server 2016-2025
diff --git a/Installer/Program.cs b/Installer/Program.cs
index bbb487a5..dd89646b 100644
--- a/Installer/Program.cs
+++ b/Installer/Program.cs
@@ -636,7 +636,8 @@ Execute SQL files in order
Execute installation using Installer.Core
Use DependencyInstaller for community dependencies before validation
*/
- using var dependencyInstaller = new DependencyInstaller();
+ string communityDir = Path.Combine(monitorRootDirectory, "community");
+ using var dependencyInstaller = new DependencyInstaller(communityDir);
var installResult = await InstallationService.ExecuteInstallationAsync(
connectionString,
diff --git a/InstallerGui/MainWindow.xaml.cs b/InstallerGui/MainWindow.xaml.cs
index 6103e949..1f532c89 100644
--- a/InstallerGui/MainWindow.xaml.cs
+++ b/InstallerGui/MainWindow.xaml.cs
@@ -60,7 +60,8 @@ public MainWindow()
try
{
InitializeComponent();
- _dependencyInstaller = new DependencyInstaller();
+ string communityDir = System.IO.Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "community");
+ _dependencyInstaller = new DependencyInstaller(communityDir);
/*Set window title with version*/
Title = $"Performance Monitor Installer v{AppVersion}";
diff --git a/Lite/Controls/FinOpsTab.xaml b/Lite/Controls/FinOpsTab.xaml
index 1cae8045..ae2fc7e3 100644
--- a/Lite/Controls/FinOpsTab.xaml
+++ b/Lite/Controls/FinOpsTab.xaml
@@ -2234,11 +2234,19 @@
+
+
+
+
+
+
+
+
-
+
diff --git a/Lite/Controls/ServerTab.xaml.cs b/Lite/Controls/ServerTab.xaml.cs
index 4370176b..6035727d 100644
--- a/Lite/Controls/ServerTab.xaml.cs
+++ b/Lite/Controls/ServerTab.xaml.cs
@@ -153,16 +153,7 @@ public ServerTab(ServerConnection server, DuckDbInitializer duckDb, CredentialSe
};
_refreshTimer.Tick += async (s, e) =>
{
- if (_isRefreshing) return;
- _isRefreshing = true;
- try
- {
- await RefreshAllDataAsync(fullRefresh: false);
- }
- finally
- {
- _isRefreshing = false;
- }
+ await RefreshAllDataAsync(fullRefresh: false);
};
_refreshTimer.Start();
diff --git a/Lite/Mcp/McpQueryTools.cs b/Lite/Mcp/McpQueryTools.cs
index 8299248e..907d3bfb 100644
--- a/Lite/Mcp/McpQueryTools.cs
+++ b/Lite/Mcp/McpQueryTools.cs
@@ -33,19 +33,17 @@ public static async Task GetTopQueriesByCpu(
var topError = McpHelpers.ValidateTop(top, "top");
if (topError != null) return topError;
- var rows = await dataService.GetTopQueriesByCpuAsync(resolved.Value.ServerId, hours_back, top * 5);
+ var rows = await dataService.GetTopQueriesByCpuAsync(resolved.Value.ServerId, hours_back, top, databaseName: database_name);
if (rows.Count == 0)
{
return "No query stats available for the specified time range.";
}
IEnumerable filtered = rows;
- if (!string.IsNullOrEmpty(database_name))
- filtered = filtered.Where(r => string.Equals(r.DatabaseName, database_name, StringComparison.OrdinalIgnoreCase));
if (parallel_only || min_dop > 1)
filtered = filtered.Where(r => r.MaxDop > 1 && r.MaxDop >= (min_dop > 1 ? min_dop : 2));
- var result = filtered.Take(top).Select(r => new
+ var result = filtered.Select(r => new
{
database_name = r.DatabaseName,
query_hash = r.QueryHash,
@@ -109,17 +107,13 @@ public static async Task GetTopProceduresByCpu(
var topError = McpHelpers.ValidateTop(top, "top");
if (topError != null) return topError;
- var rows = await dataService.GetTopProceduresByCpuAsync(resolved.Value.ServerId, hours_back, top * 5);
+ var rows = await dataService.GetTopProceduresByCpuAsync(resolved.Value.ServerId, hours_back, top, databaseName: database_name);
if (rows.Count == 0)
{
return "No procedure stats available. Delta-based collection requires at least two collection cycles (~30 minutes) to produce non-zero values.";
}
- IEnumerable filtered = rows;
- if (!string.IsNullOrEmpty(database_name))
- filtered = filtered.Where(r => string.Equals(r.DatabaseName, database_name, StringComparison.OrdinalIgnoreCase));
-
- var result = filtered.Take(top).Select(r => new
+ var result = rows.Select(r => new
{
database_name = r.DatabaseName,
full_name = r.FullName,
@@ -178,17 +172,13 @@ public static async Task GetQueryStoreTop(
var topError = McpHelpers.ValidateTop(top, "top");
if (topError != null) return topError;
- var rows = await dataService.GetQueryStoreTopQueriesAsync(resolved.Value.ServerId, hours_back, top * 5);
+ var rows = await dataService.GetQueryStoreTopQueriesAsync(resolved.Value.ServerId, hours_back, top, databaseName: database_name);
if (rows.Count == 0)
{
return "No Query Store data available. Query Store may not be enabled on target databases.";
}
- IEnumerable filtered = rows;
- if (!string.IsNullOrEmpty(database_name))
- filtered = filtered.Where(r => string.Equals(r.DatabaseName, database_name, StringComparison.OrdinalIgnoreCase));
-
- var result = filtered.Take(top).Select(r => new
+ var result = rows.Select(r => new
{
database_name = r.DatabaseName,
query_id = r.QueryId,
diff --git a/Lite/Models/ServerConnection.cs b/Lite/Models/ServerConnection.cs
index f89ea109..5f347a92 100644
--- a/Lite/Models/ServerConnection.cs
+++ b/Lite/Models/ServerConnection.cs
@@ -89,6 +89,12 @@ public bool UseWindowsAuth
///
public bool ReadOnlyIntent { get; set; } = false;
+ ///
+ /// When true, sets MultiSubnetFailover=true on the connection string.
+ /// Recommended for AG listeners and FCIs spanning multiple subnets.
+ ///
+ public bool MultiSubnetFailover { get; set; } = false;
+
///
/// Server name with "(Read-Only)" suffix when ReadOnlyIntent is enabled.
/// Used for sidebar subtitle and status text.
@@ -205,7 +211,8 @@ private string BuildConnectionString(string? username, string? password)
CommandTimeout = 60,
TrustServerCertificate = TrustServerCertificate,
MultipleActiveResultSets = true,
- ApplicationIntent = ReadOnlyIntent ? ApplicationIntent.ReadOnly : ApplicationIntent.ReadWrite
+ ApplicationIntent = ReadOnlyIntent ? ApplicationIntent.ReadOnly : ApplicationIntent.ReadWrite,
+ MultiSubnetFailover = MultiSubnetFailover
};
// Set encryption mode
diff --git a/Lite/PerformanceMonitorLite.csproj b/Lite/PerformanceMonitorLite.csproj
index d87dde0a..784cd81b 100644
--- a/Lite/PerformanceMonitorLite.csproj
+++ b/Lite/PerformanceMonitorLite.csproj
@@ -8,10 +8,10 @@
PerformanceMonitorLite
PerformanceMonitorLite
SQL Server Performance Monitor Lite
- 2.6.0
- 2.6.0.0
- 2.6.0.0
- 2.6.0
+ 2.7.0
+ 2.7.0.0
+ 2.7.0.0
+ 2.7.0
Darling Data, LLC
Copyright © 2026 Darling Data, LLC
Lightweight SQL Server performance monitoring - no installation required on target servers
diff --git a/Lite/Services/LocalDataService.FinOps.cs b/Lite/Services/LocalDataService.FinOps.cs
index 73bdbe50..c2290f06 100644
--- a/Lite/Services/LocalDataService.FinOps.cs
+++ b/Lite/Services/LocalDataService.FinOps.cs
@@ -93,10 +93,23 @@ WHEN CONVERT(int, SERVERPROPERTY('EngineEdition')) = 5
THEN N'SELECT @gb = SUM(CAST(size AS bigint)) * 8.0 / 1024.0 / 1024.0 FROM sys.database_files'
ELSE N'SELECT @gb = SUM(CAST(size AS bigint)) * 8.0 / 1024.0 / 1024.0 FROM sys.master_files'
END,
- @storage_gb decimal(19,2);
+ @storage_gb decimal(19,2),
+ @host_os nvarchar(256);
EXEC sys.sp_executesql @storage_sql, N'@gb decimal(19,2) OUTPUT', @gb = @storage_gb OUTPUT;
+IF OBJECT_ID(N'sys.dm_os_host_info', N'V') IS NOT NULL
+ EXEC sys.sp_executesql N'SELECT @os = host_distribution FROM sys.dm_os_host_info',
+ N'@os nvarchar(256) OUTPUT', @os = @host_os OUTPUT;
+
+IF @host_os IS NULL
+BEGIN
+ DECLARE @ver nvarchar(4000) = @@VERSION;
+ DECLARE @on_pos int = CHARINDEX(N' on ', @ver);
+ IF @on_pos > 0
+ SET @host_os = LTRIM(SUBSTRING(@ver, @on_pos + 4, LEN(@ver)));
+END;
+
SELECT
CONVERT(nvarchar(256), SERVERPROPERTY('Edition')),
CONVERT(nvarchar(128), SERVERPROPERTY('ProductVersion')),
@@ -110,7 +123,8 @@ WHEN CONVERT(int, SERVERPROPERTY('EngineEdition')) = 5
si.cores_per_socket,
CONVERT(int, SERVERPROPERTY('EngineEdition')),
CONVERT(int, SERVERPROPERTY('IsHadrEnabled')),
- CONVERT(int, SERVERPROPERTY('IsClustered'))
+ CONVERT(int, SERVERPROPERTY('IsClustered')),
+ @host_os
FROM sys.dm_os_sys_info AS si;";
using var command = new SqlCommand(query, connection) { CommandTimeout = 30 };
@@ -137,6 +151,7 @@ WHEN CONVERT(int, SERVERPROPERTY('EngineEdition')) = 5
EngineEdition = reader.IsDBNull(10) ? 0 : Convert.ToInt32(reader.GetValue(10)),
IsHadrEnabled = reader.IsDBNull(11) ? null : Convert.ToInt32(reader.GetValue(11)) == 1,
IsClustered = reader.IsDBNull(12) ? null : Convert.ToInt32(reader.GetValue(12)) == 1,
+ HostOsVersion = reader.IsDBNull(13) ? "" : reader.GetString(13),
LastUpdated = DateTime.Now
};
}
@@ -2295,6 +2310,7 @@ public class ServerPropertyRow
public string ServerName { get; set; } = "";
public string Edition { get; set; } = "";
public string ProductVersion { get; set; } = "";
+ public string HostOsVersion { get; set; } = "";
public string? ProductLevel { get; set; }
public string? ProductUpdateLevel { get; set; }
public int EngineEdition { get; set; }
diff --git a/Lite/Services/LocalDataService.QueryStats.cs b/Lite/Services/LocalDataService.QueryStats.cs
index 4822c0af..f2df4f28 100644
--- a/Lite/Services/LocalDataService.QueryStats.cs
+++ b/Lite/Services/LocalDataService.QueryStats.cs
@@ -83,7 +83,7 @@ GROUP BY date_trunc('hour', collection_time)
return items;
}
- public async Task> GetTopQueriesByCpuAsync(int serverId, int hoursBack = 24, int top = 50, DateTime? fromDate = null, DateTime? toDate = null, int utcOffsetMinutes = 0)
+ public async Task> GetTopQueriesByCpuAsync(int serverId, int hoursBack = 24, int top = 50, DateTime? fromDate = null, DateTime? toDate = null, int utcOffsetMinutes = 0, string? databaseName = null)
{
using var _q = TimeQuery("GetTopQueriesByCpuAsync", "v_query_stats top N by CPU");
using var connection = await OpenConnectionAsync();
@@ -92,47 +92,65 @@ public async Task> GetTopQueriesByCpuAsync(int serverId, int
var (startTime, endTime) = GetTimeRange(hoursBack, fromDate, toDate);
command.CommandText = @"
+WITH ranked AS (
+ SELECT
+ database_name,
+ query_hash,
+ MAX(last_execution_time) AS last_execution_time,
+ MAX(creation_time) AS creation_time,
+ SUM(delta_execution_count) AS total_executions,
+ SUM(delta_worker_time) AS total_cpu_us,
+ SUM(delta_elapsed_time) AS total_elapsed_us,
+ SUM(delta_logical_reads) AS total_reads,
+ SUM(delta_rows) AS total_rows,
+ SUM(delta_logical_writes) AS total_writes,
+ SUM(delta_physical_reads) AS total_physical_reads,
+ SUM(delta_spills) AS total_spills,
+ MIN(min_dop) AS min_dop,
+ MAX(max_dop) AS max_dop,
+ MIN(min_worker_time) AS min_worker_time,
+ MAX(max_worker_time) AS max_worker_time,
+ MIN(min_elapsed_time) AS min_elapsed_time,
+ MAX(max_elapsed_time) AS max_elapsed_time,
+ MIN(min_physical_reads) AS min_physical_reads,
+ MAX(max_physical_reads) AS max_physical_reads,
+ MIN(min_rows) AS min_rows,
+ MAX(max_rows) AS max_rows,
+ MIN(min_grant_kb) AS min_grant_kb,
+ MAX(max_grant_kb) AS max_grant_kb,
+ MIN(min_spills) AS min_spills,
+ MAX(max_spills) AS max_spills,
+ MAX(query_plan_hash) AS query_plan_hash,
+ MAX(sql_handle) AS sql_handle,
+ MAX(plan_handle) AS plan_handle
+ FROM v_query_stats
+ WHERE server_id = $1
+ AND collection_time >= $2
+ AND collection_time <= $3
+ AND last_execution_time >= $2 + $5 * INTERVAL '1' MINUTE
+ AND ($6 IS NULL OR database_name = $6)
+ GROUP BY database_name, query_hash
+ HAVING SUM(delta_execution_count) > 0 OR SUM(delta_elapsed_time) > 0
+ ORDER BY SUM(delta_elapsed_time) DESC
+ LIMIT $4 + 5
+)
SELECT
- database_name,
- query_hash,
- MAX(last_execution_time) AS last_execution_time,
- MAX(creation_time) AS creation_time,
- SUM(delta_execution_count) AS total_executions,
- SUM(delta_worker_time) AS total_cpu_us,
- SUM(delta_elapsed_time) AS total_elapsed_us,
- SUM(delta_logical_reads) AS total_reads,
- SUM(delta_rows) AS total_rows,
- SUM(delta_logical_writes) AS total_writes,
- SUM(delta_physical_reads) AS total_physical_reads,
- SUM(delta_spills) AS total_spills,
- MIN(min_dop) AS min_dop,
- MAX(max_dop) AS max_dop,
- MIN(min_worker_time) AS min_worker_time,
- MAX(max_worker_time) AS max_worker_time,
- MIN(min_elapsed_time) AS min_elapsed_time,
- MAX(max_elapsed_time) AS max_elapsed_time,
- MIN(min_physical_reads) AS min_physical_reads,
- MAX(max_physical_reads) AS max_physical_reads,
- MIN(min_rows) AS min_rows,
- MAX(max_rows) AS max_rows,
- MIN(min_grant_kb) AS min_grant_kb,
- MAX(max_grant_kb) AS max_grant_kb,
- MIN(min_spills) AS min_spills,
- MAX(max_spills) AS max_spills,
- MAX(query_plan_hash) AS query_plan_hash,
- MAX(sql_handle) AS sql_handle,
- MAX(plan_handle) AS plan_handle,
- MAX(query_text) AS query_text,
- MAX(query_plan_xml) AS query_plan
-FROM v_query_stats
-WHERE server_id = $1
-AND collection_time >= $2
-AND collection_time <= $3
-AND last_execution_time >= $2 + $5 * INTERVAL '1' MINUTE
-AND query_text NOT LIKE 'WAITFOR%'
-GROUP BY database_name, query_hash
-HAVING SUM(delta_execution_count) > 0 OR SUM(delta_elapsed_time) > 0
-ORDER BY SUM(delta_elapsed_time) DESC
+ r.*,
+ t.query_text,
+ t.query_plan_xml AS query_plan
+FROM ranked r
+LEFT JOIN LATERAL (
+ SELECT query_text, query_plan_xml
+ FROM v_query_stats
+ WHERE server_id = $1
+ AND query_hash = r.query_hash
+ AND database_name = r.database_name
+ AND query_text IS NOT NULL
+ ORDER BY collection_time DESC
+ LIMIT 1
+) t ON TRUE
+WHERE t.query_text IS NULL OR t.query_text NOT LIKE 'WAITFOR%'
+ORDER BY r.total_elapsed_us DESC
LIMIT $4";
command.Parameters.Add(new DuckDBParameter { Value = serverId });
@@ -140,6 +158,7 @@ ORDER BY SUM(delta_elapsed_time) DESC
command.Parameters.Add(new DuckDBParameter { Value = endTime });
command.Parameters.Add(new DuckDBParameter { Value = top });
command.Parameters.Add(new DuckDBParameter { Value = utcOffsetMinutes });
+ command.Parameters.Add(new DuckDBParameter { Value = (object?)databaseName ?? DBNull.Value });
var items = new List();
using var reader = await command.ExecuteReaderAsync();
@@ -599,7 +618,7 @@ GROUP BY date_trunc('hour', collection_time)
return items;
}
- public async Task> GetTopProceduresByCpuAsync(int serverId, int hoursBack = 24, int top = 50, DateTime? fromDate = null, DateTime? toDate = null, int utcOffsetMinutes = 0)
+ public async Task> GetTopProceduresByCpuAsync(int serverId, int hoursBack = 24, int top = 50, DateTime? fromDate = null, DateTime? toDate = null, int utcOffsetMinutes = 0, string? databaseName = null)
{
using var _q = TimeQuery("GetTopProceduresByCpuAsync", "v_procedure_stats top N by CPU");
using var connection = await OpenConnectionAsync();
@@ -641,6 +660,7 @@ FROM v_procedure_stats
AND collection_time >= $2
AND collection_time <= $3
AND last_execution_time >= $2 + $5 * INTERVAL '1' MINUTE
+AND ($6 IS NULL OR database_name = $6)
GROUP BY database_name, schema_name, object_name, object_type
HAVING SUM(delta_execution_count) > 0 OR SUM(delta_elapsed_time) > 0
ORDER BY SUM(delta_elapsed_time) DESC
@@ -651,6 +671,7 @@ ORDER BY SUM(delta_elapsed_time) DESC
command.Parameters.Add(new DuckDBParameter { Value = endTime });
command.Parameters.Add(new DuckDBParameter { Value = top });
command.Parameters.Add(new DuckDBParameter { Value = utcOffsetMinutes });
+ command.Parameters.Add(new DuckDBParameter { Value = (object?)databaseName ?? DBNull.Value });
var items = new List();
using var reader = await command.ExecuteReaderAsync();
diff --git a/Lite/Services/LocalDataService.QueryStore.cs b/Lite/Services/LocalDataService.QueryStore.cs
index b19df4bf..63851528 100644
--- a/Lite/Services/LocalDataService.QueryStore.cs
+++ b/Lite/Services/LocalDataService.QueryStore.cs
@@ -67,7 +67,7 @@ GROUP BY date_trunc('hour', collection_time)
return items;
}
- public async Task> GetQueryStoreTopQueriesAsync(int serverId, int hoursBack = 24, int top = 50, DateTime? fromDate = null, DateTime? toDate = null)
+ public async Task> GetQueryStoreTopQueriesAsync(int serverId, int hoursBack = 24, int top = 50, DateTime? fromDate = null, DateTime? toDate = null, string? databaseName = null)
{
using var _q = TimeQuery("GetQueryStoreTopQueriesAsync", "v_query_store_stats top N");
using var connection = await OpenConnectionAsync();
@@ -76,49 +76,94 @@ public async Task> GetQueryStoreTopQueriesAsync(int serverId
var (startTime, endTime) = GetTimeRange(hoursBack, fromDate, toDate);
command.CommandText = @"
+WITH ranked AS (
+ SELECT
+ database_name,
+ query_id,
+ plan_id,
+ query_hash,
+ MAX(module_name) AS module_name,
+ SUM(execution_count) AS total_executions,
+ AVG(CAST(avg_duration_us AS DOUBLE)) / 1000.0 AS avg_duration_ms,
+ AVG(CAST(avg_cpu_time_us AS DOUBLE)) / 1000.0 AS avg_cpu_time_ms,
+ AVG(CAST(avg_logical_io_reads AS DOUBLE)) AS avg_logical_reads,
+ AVG(CAST(avg_logical_io_writes AS DOUBLE)) AS avg_logical_writes,
+ AVG(CAST(avg_physical_io_reads AS DOUBLE)) AS avg_physical_reads,
+ AVG(CAST(avg_rowcount AS DOUBLE)) AS avg_rowcount,
+ MIN(min_dop) AS min_dop,
+ MAX(max_dop) AS max_dop,
+ MAX(last_execution_time) AS last_execution_time,
+ MAX(query_plan_hash) AS query_plan_hash,
+ MAX(CASE WHEN is_forced_plan THEN TRUE ELSE FALSE END) AS is_forced_plan,
+ MAX(plan_forcing_type) AS plan_forcing_type,
+ MAX(execution_type_desc) AS execution_type_desc,
+ MIN(first_execution_time) AS first_execution_time,
+ AVG(CAST(avg_clr_time_us AS DOUBLE)) / 1000.0 AS avg_clr_time_ms,
+ AVG(CAST(avg_tempdb_space_used AS DOUBLE)) AS avg_tempdb_space_used,
+ AVG(CAST(avg_log_bytes_used AS DOUBLE)) AS avg_log_bytes_used,
+ MAX(plan_type) AS plan_type,
+ MAX(force_failure_count) AS force_failure_count,
+ MAX(last_force_failure_reason) AS last_force_failure_reason,
+ MAX(compatibility_level) AS compatibility_level
+ FROM v_query_store_stats
+ WHERE server_id = $1
+ AND collection_time >= $2
+ AND collection_time <= $3
+ AND ($5 IS NULL OR database_name = $5)
+ GROUP BY database_name, query_id, plan_id, query_hash
+ ORDER BY SUM(execution_count) * AVG(CAST(avg_duration_us AS DOUBLE)) DESC
+ LIMIT $4 + 5
+)
SELECT
- database_name,
- query_id,
- plan_id,
- query_hash,
- MAX(query_text) AS query_text,
- MAX(module_name) AS module_name,
- SUM(execution_count) AS total_executions,
- AVG(CAST(avg_duration_us AS DOUBLE)) / 1000.0 AS avg_duration_ms,
- AVG(CAST(avg_cpu_time_us AS DOUBLE)) / 1000.0 AS avg_cpu_time_ms,
- AVG(CAST(avg_logical_io_reads AS DOUBLE)) AS avg_logical_reads,
- AVG(CAST(avg_logical_io_writes AS DOUBLE)) AS avg_logical_writes,
- AVG(CAST(avg_physical_io_reads AS DOUBLE)) AS avg_physical_reads,
- AVG(CAST(avg_rowcount AS DOUBLE)) AS avg_rowcount,
- MIN(min_dop) AS min_dop,
- MAX(max_dop) AS max_dop,
- MAX(last_execution_time) AS last_execution_time,
- MAX(query_plan_hash) AS query_plan_hash,
- MAX(CASE WHEN is_forced_plan THEN TRUE ELSE FALSE END) AS is_forced_plan,
- MAX(plan_forcing_type) AS plan_forcing_type,
+ r.database_name,
+ r.query_id,
+ r.plan_id,
+ r.query_hash,
+ t.query_text,
+ r.module_name,
+ r.total_executions,
+ r.avg_duration_ms,
+ r.avg_cpu_time_ms,
+ r.avg_logical_reads,
+ r.avg_logical_writes,
+ r.avg_physical_reads,
+ r.avg_rowcount,
+ r.min_dop,
+ r.max_dop,
+ r.last_execution_time,
+ r.query_plan_hash,
+ r.is_forced_plan,
+ r.plan_forcing_type,
NULL AS query_plan_text,
- MAX(execution_type_desc) AS execution_type_desc,
- MIN(first_execution_time) AS first_execution_time,
- AVG(CAST(avg_clr_time_us AS DOUBLE)) / 1000.0 AS avg_clr_time_ms,
- AVG(CAST(avg_tempdb_space_used AS DOUBLE)) AS avg_tempdb_space_used,
- AVG(CAST(avg_log_bytes_used AS DOUBLE)) AS avg_log_bytes_used,
- MAX(plan_type) AS plan_type,
- MAX(force_failure_count) AS force_failure_count,
- MAX(last_force_failure_reason) AS last_force_failure_reason,
- MAX(compatibility_level) AS compatibility_level
-FROM v_query_store_stats
-WHERE server_id = $1
-AND collection_time >= $2
-AND collection_time <= $3
-AND query_text NOT LIKE 'WAITFOR%'
-GROUP BY database_name, query_id, plan_id, query_hash
-ORDER BY SUM(execution_count) * AVG(CAST(avg_duration_us AS DOUBLE)) DESC
+ r.execution_type_desc,
+ r.first_execution_time,
+ r.avg_clr_time_ms,
+ r.avg_tempdb_space_used,
+ r.avg_log_bytes_used,
+ r.plan_type,
+ r.force_failure_count,
+ r.last_force_failure_reason,
+ r.compatibility_level
+FROM ranked r
+LEFT JOIN LATERAL (
+ SELECT query_text
+ FROM v_query_store_stats
+ WHERE server_id = $1
+ AND query_id = r.query_id
+ AND database_name = r.database_name
+ AND query_text IS NOT NULL
+ ORDER BY collection_time DESC
+ LIMIT 1
+) t ON TRUE
+WHERE t.query_text IS NULL OR t.query_text NOT LIKE 'WAITFOR%'
+ORDER BY r.total_executions * r.avg_duration_ms DESC
LIMIT $4";
command.Parameters.Add(new DuckDBParameter { Value = serverId });
command.Parameters.Add(new DuckDBParameter { Value = startTime });
command.Parameters.Add(new DuckDBParameter { Value = endTime });
command.Parameters.Add(new DuckDBParameter { Value = top });
+ command.Parameters.Add(new DuckDBParameter { Value = (object?)databaseName ?? DBNull.Value });
var items = new List();
using var reader = await command.ExecuteReaderAsync();
diff --git a/Lite/Services/PlanAnalyzer.cs b/Lite/Services/PlanAnalyzer.cs
index 0ead490a..866def3b 100644
--- a/Lite/Services/PlanAnalyzer.cs
+++ b/Lite/Services/PlanAnalyzer.cs
@@ -38,10 +38,11 @@ public static void Analyze(ParsedPlan plan)
private static void AnalyzeStatement(PlanStatement stmt)
{
// Rule 3: Serial plan with reason
- // Skip: trivial cost (< 0.01), TRIVIAL optimization (can't go parallel anyway),
+ // Skip: cost < 1 (CTFP is an integer so cost < 1 can never go parallel),
+ // TRIVIAL optimization (can't go parallel anyway),
// and 0ms actual elapsed time (not worth flagging).
if (!string.IsNullOrEmpty(stmt.NonParallelPlanReason)
- && stmt.StatementSubTreeCost >= 0.01
+ && stmt.StatementSubTreeCost >= 1.0
&& stmt.StatementOptmLevel != "TRIVIAL"
&& !(stmt.QueryTimeStats != null && stmt.QueryTimeStats.ElapsedTimeMs == 0))
{
@@ -105,12 +106,44 @@ private static void AnalyzeStatement(PlanStatement stmt)
or "NoParallelWithRemoteQuery"
or "NoRemoteParallelismForMatrix";
- stmt.PlanWarnings.Add(new PlanWarning
+ // MaxDOPSetToOne needs special handling: check whether the user explicitly
+ // set MAXDOP 1 in the query text, or if it's a server/db/RG setting.
+ // SQL Server truncates StatementText at ~4,000 characters in plan XML.
+ if (stmt.NonParallelPlanReason == "MaxDOPSetToOne")
{
- WarningType = "Serial Plan",
- Message = $"Query running serially: {reason}.",
- Severity = isActionable ? PlanWarningSeverity.Warning : PlanWarningSeverity.Info
- });
+ var text = stmt.StatementText ?? "";
+ var hasMaxdop1InText = Regex.IsMatch(text, @"MAXDOP\s+1\b", RegexOptions.IgnoreCase);
+ var isTruncated = text.Length >= 3990;
+
+ if (hasMaxdop1InText)
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "Serial Plan",
+ Message = $"Query running serially: {reason}.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ else if (isTruncated)
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "Serial Plan",
+ Message = $"Query running serially: {reason}. MAXDOP 1 may be set at the server, database, resource governor, or query level (query text was truncated).",
+ Severity = PlanWarningSeverity.Info
+ });
+ }
+ // else: not truncated, no MAXDOP 1 in text — server/db/RG setting, suppress entirely
+ }
+ else
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "Serial Plan",
+ Message = $"Query running serially: {reason}.",
+ Severity = isActionable ? PlanWarningSeverity.Warning : PlanWarningSeverity.Info
+ });
+ }
}
// Rule 9: Memory grant issues (statement-level)
@@ -203,8 +236,8 @@ private static void AnalyzeStatement(PlanStatement stmt)
// Rule 20: Local variables without RECOMPILE
// Parameters with no CompiledValue are likely local variables — the optimizer
// cannot sniff their values and uses density-based ("unknown") estimates.
- // Skip trivial statements (simple variable assignments) where estimate quality doesn't matter.
- if (stmt.Parameters.Count > 0 && stmt.StatementSubTreeCost >= 0.01)
+ // Skip statements with cost < 1 (can't go parallel, estimate quality rarely matters).
+ if (stmt.Parameters.Count > 0 && stmt.StatementSubTreeCost >= 1.0)
{
var unsnifffedParams = stmt.Parameters
.Where(p => string.IsNullOrEmpty(p.CompiledValue))
@@ -259,28 +292,33 @@ private static void AnalyzeStatement(PlanStatement stmt)
var speedup = (double)cpu / elapsed;
var efficiency = Math.Max(0.0, Math.Min(100.0, (speedup - 1.0) / (dop - 1.0) * 100.0));
+ // Build targeted advice from wait stats if available
+ var waitAdvice = GetWaitStatsAdvice(stmt.WaitStats);
+
if (speedup < 0.5)
{
// CPU well below Elapsed: threads are waiting, not doing CPU work
var waitPct = (1.0 - speedup) * 100;
+ var advice = waitAdvice ?? "Common causes include spills to tempdb, physical I/O reads, lock or latch contention, and memory grant waits.";
stmt.PlanWarnings.Add(new PlanWarning
{
WarningType = "Parallel Wait Bottleneck",
Message = $"Parallel plan (DOP {dop}, {efficiency:N0}% efficient) with elapsed time ({elapsed:N0}ms) exceeding CPU time ({cpu:N0}ms). " +
$"Approximately {waitPct:N0}% of elapsed time was spent waiting rather than on CPU. " +
- $"Common causes include spills to tempdb, physical I/O reads, lock or latch contention, and memory grant waits.",
+ advice,
Severity = PlanWarningSeverity.Warning
});
}
else if (efficiency < 40)
{
// CPU >= Elapsed but well below DOP potential — parallelism is ineffective
+ var advice = waitAdvice ?? "Look for parallel thread skew, blocking exchanges, or serial zones in the plan that prevent effective parallel execution.";
stmt.PlanWarnings.Add(new PlanWarning
{
WarningType = "Ineffective Parallelism",
Message = $"Parallel plan (DOP {dop}) is only {efficiency:N0}% efficient — CPU time ({cpu:N0}ms) vs elapsed time ({elapsed:N0}ms). " +
$"At DOP {dop}, ideal CPU time would be ~{elapsed * dop:N0}ms. " +
- $"Look for parallel thread skew, blocking exchanges, or serial zones in the plan that prevent effective parallel execution.",
+ advice,
Severity = efficiency < 20 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
});
}
@@ -483,8 +521,11 @@ private static void AnalyzeNode(PlanNode node, PlanStatement stmt)
{
if (node.ActualRows == 0)
{
- // Zero rows is always worth noting — resources were allocated for nothing
- if (node.EstimateRows >= 100)
+ // Zero rows with a significant estimate — only warn on operators that
+ // actually allocate meaningful resources (memory grants for hash/sort/spool).
+ // Skip Parallelism, Bitmap, Compute Scalar, Filter, Concatenation, etc.
+ // where 0 rows is just a consequence of upstream filtering.
+ if (node.EstimateRows >= 100 && AllocatesResources(node))
{
node.Warnings.Add(new PlanWarning
{
@@ -670,14 +711,76 @@ _ when nonSargableReason.StartsWith("Function call", StringComparison.OrdinalIgn
!IsProbeOnly(node.Predicate))
{
var displayPredicate = StripProbeExpressions(node.Predicate);
+ var details = BuildScanImpactDetails(node, stmt);
+ var severity = PlanWarningSeverity.Warning;
+ if (details.CostPct >= 90 || details.ElapsedPct >= 90)
+ severity = PlanWarningSeverity.Critical;
+ var message = "Scan with residual predicate — SQL Server is reading every row and filtering after the fact.";
+ if (!string.IsNullOrEmpty(details.Summary))
+ message += $" {details.Summary}";
+ message += " Check that you have appropriate indexes.";
+ message += $"\nPredicate: {Truncate(displayPredicate, 200)}";
node.Warnings.Add(new PlanWarning
{
WarningType = "Scan With Predicate",
- Message = $"Scan with residual predicate — SQL Server is reading every row and filtering after the fact. Check that you have appropriate indexes.\nPredicate: {Truncate(displayPredicate, 200)}",
- Severity = PlanWarningSeverity.Warning
+ Message = message,
+ Severity = severity
});
}
+ // Rule 32: Cardinality misestimate on expensive scan — likely preventing index usage
+ // When a scan dominates the plan AND the estimate is vastly higher than actual rows,
+ // the optimizer chose a scan because it thought it needed most of the table.
+ // With accurate estimates, it would likely seek instead.
+ if (node.HasActualStats && IsRowstoreScan(node)
+ && node.EstimateRows > 0 && node.ActualRows >= 0 && node.ActualRowsRead > 0)
+ {
+ var impact = BuildScanImpactDetails(node, stmt);
+ var overestimateRatio = node.EstimateRows / Math.Max(1.0, node.ActualRows);
+ var selectivity = (double)node.ActualRows / node.ActualRowsRead;
+
+ // Fire when: scan is >= 50% of plan, estimate is >= 10x actual, and < 10% selectivity
+ if ((impact.CostPct >= 50 || impact.ElapsedPct >= 50)
+ && overestimateRatio >= 10.0
+ && selectivity < 0.10)
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Scan Cardinality Misestimate",
+ Message = $"Estimated {node.EstimateRows:N0} rows but only {node.ActualRows:N0} returned ({selectivity * 100:N3}% of {node.ActualRowsRead:N0} rows read). " +
+ $"The {overestimateRatio:N0}x overestimate likely caused the optimizer to choose a scan instead of a seek. " +
+ $"An index on the predicate columns could dramatically reduce I/O.",
+ Severity = PlanWarningSeverity.Critical
+ });
+ }
+ }
+
+ // Rule 33: Estimated plan CE guess detection — scans with telltale default selectivity
+ // When the optimizer uses a local variable or can't sniff, it falls back to density-based
+ // guesses: 30% (equality), 10% (inequality), 9% (LIKE/between), ~16.43% (sqrt(30%)),
+ // 1% (multi-inequality). On large tables, these guesses can hide the need for an index.
+ if (!node.HasActualStats && IsRowstoreScan(node)
+ && node.TableCardinality >= 100_000 && node.EstimateRows > 0
+ && !string.IsNullOrEmpty(node.Predicate))
+ {
+ var impact = BuildScanImpactDetails(node, stmt);
+ if (impact.CostPct >= 50)
+ {
+ var guessDesc = DetectCeGuess(node.EstimateRows, node.TableCardinality);
+ if (guessDesc != null)
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Estimated Plan CE Guess",
+ Message = $"Estimated {node.EstimateRows:N0} rows from {node.TableCardinality:N0} row table — {guessDesc}. " +
+ $"The optimizer may be using a default guess instead of accurate statistics. " +
+ $"If actual selectivity is much lower, an index on the predicate columns could help significantly.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ }
+ }
+
// Rule 13: Mismatched data types (GetRangeWithMismatchedTypes / GetRangeThroughConvert)
if (node.PhysicalOp == "Compute Scalar" && !string.IsNullOrEmpty(node.DefinedValues))
{
@@ -1072,12 +1175,14 @@ private static bool IsScanOperator(PlanNode node)
if (IsNullCoalesceRegExp().IsMatch(predicate))
return "ISNULL/COALESCE wrapping column";
- // Common function calls on columns
+ // Common function calls on columns — but only if the function wraps a column,
+ // not a parameter/variable. Split on comparison operators to check which side
+ // the function is on. Predicate format: [db].[schema].[table].[col]>func(...)
var funcMatch = FunctionInPredicateRegex.Match(predicate);
if (funcMatch.Success)
{
var funcName = funcMatch.Groups[1].Value.ToUpperInvariant();
- if (funcName != "CONVERT_IMPLICIT")
+ if (funcName != "CONVERT_IMPLICIT" && IsFunctionOnColumnSide(predicate, funcMatch))
return $"Function call ({funcName}) on column";
}
@@ -1430,6 +1535,156 @@ private static string Truncate(string value, int maxLength)
return value.Length <= maxLength ? value : value[..maxLength] + "...";
}
+ ///
+ /// Returns targeted advice based on statement-level wait stats, or null if no waits.
+ /// When the dominant wait type is clear, gives specific guidance instead of generic advice.
+ ///
+ private static string? GetWaitStatsAdvice(List waits)
+ {
+ if (waits.Count == 0)
+ return null;
+
+ var totalMs = waits.Sum(w => w.WaitTimeMs);
+ if (totalMs == 0)
+ return null;
+
+ var top = waits.OrderByDescending(w => w.WaitTimeMs).First();
+ var topPct = (double)top.WaitTimeMs / totalMs * 100;
+
+ // Only give targeted advice if the dominant wait is >= 80% of total wait time
+ if (topPct < 80)
+ return null;
+
+ var waitType = top.WaitType.ToUpperInvariant();
+ var advice = waitType switch
+ {
+ _ when waitType.StartsWith("PAGEIOLATCH", StringComparison.Ordinal) =>
+ $"I/O bound — {topPct:N0}% of wait time is {top.WaitType}. Data is being read from disk rather than memory. Consider adding indexes to reduce I/O, or investigate memory pressure.",
+ _ when waitType.StartsWith("LATCH_", StringComparison.Ordinal) =>
+ $"Latch contention — {topPct:N0}% of wait time is {top.WaitType}.",
+ _ when waitType.StartsWith("LCK_", StringComparison.Ordinal) =>
+ $"Lock contention — {topPct:N0}% of wait time is {top.WaitType}. Other sessions are holding locks that this query needs.",
+ _ when waitType.StartsWith("CXPACKET", StringComparison.Ordinal) || waitType.StartsWith("CXCONSUMER", StringComparison.Ordinal) =>
+ $"Parallel thread skew — {topPct:N0}% of wait time is {top.WaitType}. Work is unevenly distributed across parallel threads.",
+ _ when waitType.Contains("IO_COMPLETION", StringComparison.Ordinal) =>
+ $"I/O bound — {topPct:N0}% of wait time is {top.WaitType}.",
+ _ when waitType.StartsWith("RESOURCE_SEMAPHORE", StringComparison.Ordinal) =>
+ $"Memory grant wait — {topPct:N0}% of wait time is {top.WaitType}. The query had to wait for a memory grant.",
+ _ => $"Dominant wait is {top.WaitType} ({topPct:N0}% of wait time)."
+ };
+
+ return advice;
+ }
+
+ ///
+ /// Returns true for operators that allocate meaningful resources based on row estimates.
+ /// Hash Match (hash table), Sort (sort buffer), Spool (worktable).
+ ///
+ private static bool AllocatesResources(PlanNode node)
+ {
+ var op = node.PhysicalOp;
+ return op.StartsWith("Hash", StringComparison.OrdinalIgnoreCase)
+ || op.StartsWith("Sort", StringComparison.OrdinalIgnoreCase)
+ || op.EndsWith("Spool", StringComparison.OrdinalIgnoreCase);
+ }
+
+ private record ScanImpact(double CostPct, double ElapsedPct, string? Summary);
+
+ ///
+ /// Builds impact details for a scan node: what % of plan time/cost it represents,
+ /// and what fraction of rows survived filtering.
+ ///
+ private static ScanImpact BuildScanImpactDetails(PlanNode node, PlanStatement stmt)
+ {
+ var parts = new List();
+
+ // % of plan cost
+ double costPct = 0;
+ if (stmt.StatementSubTreeCost > 0 && node.EstimatedTotalSubtreeCost > 0)
+ {
+ costPct = node.EstimatedTotalSubtreeCost / stmt.StatementSubTreeCost * 100;
+ if (costPct >= 50)
+ parts.Add($"This scan is {costPct:N0}% of the plan cost.");
+ }
+
+ // % of elapsed time (actual plans)
+ double elapsedPct = 0;
+ if (node.HasActualStats && node.ActualElapsedMs > 0 &&
+ stmt.QueryTimeStats != null && stmt.QueryTimeStats.ElapsedTimeMs > 0)
+ {
+ elapsedPct = (double)node.ActualElapsedMs / stmt.QueryTimeStats.ElapsedTimeMs * 100;
+ if (elapsedPct >= 50)
+ parts.Add($"This scan took {elapsedPct:N0}% of elapsed time.");
+ }
+
+ // Row selectivity: rows returned vs rows read (actual) or vs table cardinality (estimated)
+ if (node.HasActualStats && node.ActualRowsRead > 0 && node.ActualRows < node.ActualRowsRead)
+ {
+ var selectivity = (double)node.ActualRows / node.ActualRowsRead * 100;
+ if (selectivity < 10)
+ parts.Add($"Only {selectivity:N3}% of rows survived filtering ({node.ActualRows:N0} of {node.ActualRowsRead:N0}).");
+ }
+ else if (!node.HasActualStats && node.TableCardinality > 0 && node.EstimateRows < node.TableCardinality)
+ {
+ var selectivity = node.EstimateRows / node.TableCardinality * 100;
+ if (selectivity < 10)
+ parts.Add($"Only {selectivity:N1}% of rows estimated to survive filtering.");
+ }
+
+ return new ScanImpact(costPct, elapsedPct, parts.Count > 0 ? string.Join(" ", parts) : null);
+ }
+
+ ///
+ /// Checks whether a function call in a predicate is on the column side of the comparison.
+ /// Predicate ScalarStrings look like: [db].[schema].[table].[col]>dateadd(day,(0),[@var])
+ /// If the function is only on the parameter/literal side, it's still SARGable.
+ ///
+ private static bool IsFunctionOnColumnSide(string predicate, Match funcMatch)
+ {
+ // Find the comparison operator that splits the predicate into left/right sides.
+ // Operators in ScalarString: >=, <=, <>, >, <, =
+ var compMatch = Regex.Match(predicate, @"(?])([<>=!]{1,2})(?![<>=])");
+ if (!compMatch.Success)
+ return true; // No comparison found — can't determine side, assume worst case
+
+ var compPos = compMatch.Index;
+ var funcPos = funcMatch.Index;
+
+ // Determine which side the function is on
+ var funcSide = funcPos < compPos ? "left" : "right";
+
+ // Check if that side also contains a column reference [...].[...].[...]
+ string side = funcSide == "left"
+ ? predicate[..compPos]
+ : predicate[(compPos + compMatch.Length)..];
+
+ // Column references are multi-part bracket-qualified: [schema].[table].[column]
+ // Variables are [@var] or [@var] — single bracket pair with @ prefix.
+ // Match [identifier].[identifier] (at least two dotted parts) to distinguish columns.
+ return Regex.IsMatch(side, @"\[[^\]@]+\]\.\[");
+ }
+
+ ///
+ /// Detects well-known CE default selectivity guesses by comparing EstimateRows to TableCardinality.
+ /// Returns a description of the guess pattern, or null if no known pattern matches.
+ ///
+ private static string? DetectCeGuess(double estimateRows, double tableCardinality)
+ {
+ if (tableCardinality <= 0) return null;
+ var selectivity = estimateRows / tableCardinality;
+
+ // Known CE guess selectivities with a 2% tolerance band
+ return selectivity switch
+ {
+ >= 0.29 and <= 0.31 => $"matches the 30% equality guess ({selectivity * 100:N1}%)",
+ >= 0.098 and <= 0.102 => $"matches the 10% inequality guess ({selectivity * 100:N1}%)",
+ >= 0.088 and <= 0.092 => $"matches the 9% LIKE/BETWEEN guess ({selectivity * 100:N1}%)",
+ >= 0.155 and <= 0.175 => $"matches the ~16.4% compound predicate guess ({selectivity * 100:N1}%)",
+ >= 0.009 and <= 0.011 => $"matches the 1% multi-inequality guess ({selectivity * 100:N1}%)",
+ _ => null
+ };
+ }
+
[GeneratedRegex(@"\b(CONVERT_IMPLICIT|CONVERT|CAST|isnull|coalesce|datepart|datediff|dateadd|year|month|day|upper|lower|ltrim|rtrim|trim|substring|left|right|charindex|replace|len|datalength|abs|floor|ceiling|round|reverse|stuff|format)\s*\(", RegexOptions.IgnoreCase)]
private static partial Regex FunctionInPredicateRegExp();
[GeneratedRegex(@"\blike\b[^'""]*?N?'%", RegexOptions.IgnoreCase)]
diff --git a/Lite/Services/ShowPlanParser.cs b/Lite/Services/ShowPlanParser.cs
index 1e825e94..c9060e5d 100644
--- a/Lite/Services/ShowPlanParser.cs
+++ b/Lite/Services/ShowPlanParser.cs
@@ -37,8 +37,9 @@ public static ParsedPlan Parse(string xml)
foreach (var batchEl in batches)
{
var batch = new PlanBatch();
- var statementsEl = batchEl.Element(Ns + "Statements");
- if (statementsEl != null)
+ // A Batch can contain multiple elements (e.g., DECLARE + SELECT).
+ // Use Elements() to iterate all of them, not just the first.
+ foreach (var statementsEl in batchEl.Elements(Ns + "Statements"))
{
foreach (var stmtEl in statementsEl.Elements())
{
@@ -204,7 +205,27 @@ private static List ParseStatementAndChildren(XElement stmtEl)
}
}
- if (queryPlanEl == null) return stmt;
+ if (queryPlanEl == null)
+ {
+ // Statements with no QueryPlan (e.g., DECLARE/ASSIGN) still get a synthetic
+ // root node so they appear in the statement tab list.
+ var stmtType = stmt.StatementType.Length > 0
+ ? stmt.StatementType.ToUpperInvariant()
+ : "STATEMENT";
+ stmt.RootNode = new PlanNode
+ {
+ NodeId = -1,
+ PhysicalOp = stmtType,
+ LogicalOp = stmtType,
+ IconName = stmtType switch
+ {
+ "ASSIGN" => "assign",
+ "DECLARE" => "declare",
+ _ => "language_construct_catch_all"
+ }
+ };
+ return stmt;
+ }
ParseStmtAttributes(stmt, stmtEl);
ParseQueryPlanElements(stmt, stmtEl, queryPlanEl);
diff --git a/Lite/Windows/AddServerDialog.xaml b/Lite/Windows/AddServerDialog.xaml
index 33d9b8e9..0b63af4b 100644
--- a/Lite/Windows/AddServerDialog.xaml
+++ b/Lite/Windows/AddServerDialog.xaml
@@ -96,6 +96,9 @@
+
= 0)
AddedServer.MonthlyCostUsd = editCost;
@@ -375,6 +378,7 @@ private async void SaveButton_Click(object sender, RoutedEventArgs e)
DatabaseName = string.IsNullOrWhiteSpace(DatabaseNameBox.Text) ? null : DatabaseNameBox.Text.Trim(),
UtilityDatabase = string.IsNullOrWhiteSpace(UtilityDatabaseBox.Text) ? null : UtilityDatabaseBox.Text.Trim(),
ReadOnlyIntent = ReadOnlyIntentCheckBox.IsChecked == true,
+ MultiSubnetFailover = MultiSubnetFailoverCheckBox.IsChecked == true,
MonthlyCostUsd = monthlyCost
};
diff --git a/README.md b/README.md
index 9390c014..19402312 100644
--- a/README.md
+++ b/README.md
@@ -99,7 +99,7 @@ Data starts flowing within 1–5 minutes. That's it. No installation on your ser
**Upgrading from zip?** Click **Import Settings** then **Import Data** in the sidebar and point both at your old Lite folder. Settings imports server connections, alert thresholds, SMTP config, and schedules. Data imports historical DuckDB + Parquet archives. **Auto-update users** (installed via Setup.exe) get updates automatically — no manual import needed.
-**Always On AG?** Enable **ReadOnlyIntent** in the connection settings to route Lite's monitoring queries to a readable secondary, keeping the primary clear.
+**Always On AG?** Enable **ReadOnlyIntent** in the connection settings to route Lite's monitoring queries to a readable secondary, keeping the primary clear. Enable **MultiSubnetFailover** for multi-subnet failover scenarios.
### Lite Collectors
@@ -191,6 +191,8 @@ PerformanceMonitorInstaller.exe YourServerName sa YourPassword --uninstall
The installer automatically tests the connection, checks the SQL Server version (2016+ required), executes SQL scripts, downloads community dependencies, creates SQL Agent jobs, and runs initial data collection. You can also install directly from the Dashboard's Add Server dialog.
+**Air-gapped environments?** Place pre-downloaded community scripts (`sp_WhoIsActive.sql`, `DarlingData.sql`, `Install-All-Scripts.sql`) in a `community/` directory next to the installer. The installer uses local files when present and falls back to GitHub downloads otherwise.
+
### CLI Installer Options
| Option | Description |
diff --git a/community/README.md b/community/README.md
new file mode 100644
index 00000000..9bc2b264
--- /dev/null
+++ b/community/README.md
@@ -0,0 +1,14 @@
+# Community Scripts (Offline Installation)
+
+Place pre-downloaded community SQL scripts in this directory for offline/air-gapped installations.
+When files are present here, the installer uses them instead of downloading from GitHub.
+
+## Expected files
+
+| File | Source | License |
+|------|--------|---------|
+| `sp_WhoIsActive.sql` | [amachanic/sp_whoisactive](https://github.com/amachanic/sp_whoisactive) | GPLv3 |
+| `DarlingData.sql` | [erikdarlingdata/DarlingData](https://github.com/erikdarlingdata/DarlingData/tree/main/Install-All) | MIT |
+| `Install-All-Scripts.sql` | [BrentOzarULTD/SQL-Server-First-Responder-Kit](https://github.com/BrentOzarULTD/SQL-Server-First-Responder-Kit) | MIT |
+
+Any file not found here will be downloaded from GitHub as usual.
diff --git a/install/26_blocking_deadlock_analyzer.sql b/install/26_blocking_deadlock_analyzer.sql
index d8dfe243..3db34051 100644
--- a/install/26_blocking_deadlock_analyzer.sql
+++ b/install/26_blocking_deadlock_analyzer.sql
@@ -178,6 +178,8 @@ BEGIN
Aggregate deadlock data by database
Update rows if database already exists from blocking aggregation
Otherwise insert new rows
+ Include databases from previous collection with zero counts so
+ deltas reset to 0 when no new events occur (#803)
*/
WITH
deadlock_aggregates AS
@@ -192,9 +194,41 @@ BEGIN
AND bl.collection_time < @start_time
GROUP BY
bl.database_name
+ ),
+ combined_source AS
+ (
+ SELECT
+ da.database_name,
+ da.deadlock_count,
+ da.total_deadlock_wait_time_ms,
+ da.victim_count
+ FROM deadlock_aggregates AS da
+
+ UNION ALL
+
+ /*
+ Carry forward databases from previous collection with zero
+ counts so delta calculation can reset them to 0
+ */
+ SELECT DISTINCT
+ bds.database_name,
+ 0,
+ 0,
+ 0
+ FROM collect.blocking_deadlock_stats AS bds
+ WHERE bds.collection_time >= @last_deadlock_collection
+ AND bds.collection_time < @start_time
+ AND bds.database_name <> N'(none)'
+ AND NOT EXISTS
+ (
+ SELECT
+ 1/0
+ FROM deadlock_aggregates AS da
+ WHERE da.database_name = bds.database_name
+ )
)
MERGE collect.blocking_deadlock_stats WITH (SERIALIZABLE) AS target
- USING deadlock_aggregates AS source
+ USING combined_source AS source
ON target.database_name = source.database_name
AND target.collection_time >= @start_time
WHEN MATCHED
diff --git a/upgrades/2.4.0-to-2.5.0/01_widen_version_columns.sql b/upgrades/2.4.0-to-2.5.0/01_widen_version_columns.sql
index dcb1afb7..e46c1107 100644
--- a/upgrades/2.4.0-to-2.5.0/01_widen_version_columns.sql
+++ b/upgrades/2.4.0-to-2.5.0/01_widen_version_columns.sql
@@ -3,6 +3,20 @@ Widen sql_server_version and sql_server_edition columns in config.installation_h
Some @@VERSION strings exceed 255 characters (#712)
*/
+SET ANSI_NULLS ON;
+SET ANSI_PADDING ON;
+SET ANSI_WARNINGS ON;
+SET ARITHABORT ON;
+SET CONCAT_NULL_YIELDS_NULL ON;
+SET QUOTED_IDENTIFIER ON;
+SET NUMERIC_ROUNDABORT OFF;
+SET IMPLICIT_TRANSACTIONS OFF;
+SET STATISTICS TIME, IO OFF;
+GO
+
+USE PerformanceMonitor;
+GO
+
IF EXISTS
(
SELECT
diff --git a/upgrades/README.md b/upgrades/README.md
index 28395114..642281d1 100644
--- a/upgrades/README.md
+++ b/upgrades/README.md
@@ -36,30 +36,49 @@ The installer:
## Upgrade Script Guidelines
-1. **Always check before altering**: Use `IF NOT EXISTS` checks before adding columns/indexes
-2. **Be idempotent**: Scripts should be safe to run multiple times
-3. **Preserve data**: Never DROP tables with data (use ALTER/UPDATE instead)
-4. **Add comments**: Document why each change is being made
-5. **Test upgrade paths**: Test upgrading from each previous version
+1. **Start from `_template.sql`**: Copy the template for every new upgrade script — it has the required SET options and `USE PerformanceMonitor` that the installer depends on
+2. **Always check before altering**: Use `IF NOT EXISTS` / `IF EXISTS` checks before adding or modifying columns/indexes
+3. **Be idempotent**: Scripts should be safe to run multiple times
+4. **Preserve data**: Never DROP tables with data (use ALTER/UPDATE instead)
+5. **Add comments**: Document why each change is being made
+6. **Test upgrade paths**: Test upgrading from each previous version
## Example Upgrade Script
```sql
/*
+Copyright 2026 Darling Data, LLC
+https://www.erikdarling.com/
+
Upgrade from 1.0.0 to 1.1.0
Adds execution context tracking to query_stats
*/
--- Add new column if it doesn't exist
-IF NOT EXISTS (
- SELECT 1/0
+SET ANSI_NULLS ON;
+SET ANSI_PADDING ON;
+SET ANSI_WARNINGS ON;
+SET ARITHABORT ON;
+SET CONCAT_NULL_YIELDS_NULL ON;
+SET QUOTED_IDENTIFIER ON;
+SET NUMERIC_ROUNDABORT OFF;
+SET IMPLICIT_TRANSACTIONS OFF;
+SET STATISTICS TIME, IO OFF;
+GO
+
+USE PerformanceMonitor;
+GO
+
+IF NOT EXISTS
+(
+ SELECT
+ 1/0
FROM sys.columns
- WHERE object_id = OBJECT_ID('collect.query_stats')
- AND name = 'execution_context'
+ WHERE object_id = OBJECT_ID(N'collect.query_stats')
+ AND name = N'execution_context'
)
BEGIN
ALTER TABLE collect.query_stats
- ADD execution_context nvarchar(128) NULL;
+ ADD execution_context nvarchar(128) NULL;
PRINT 'Added execution_context column to collect.query_stats';
END;
diff --git a/upgrades/_template.sql b/upgrades/_template.sql
new file mode 100644
index 00000000..c221c882
--- /dev/null
+++ b/upgrades/_template.sql
@@ -0,0 +1,23 @@
+/*
+Copyright 2026 Darling Data, LLC
+https://www.erikdarling.com/
+
+Upgrade from X.Y.Z to X.Y.Z
+
+*/
+
+SET ANSI_NULLS ON;
+SET ANSI_PADDING ON;
+SET ANSI_WARNINGS ON;
+SET ARITHABORT ON;
+SET CONCAT_NULL_YIELDS_NULL ON;
+SET QUOTED_IDENTIFIER ON;
+SET NUMERIC_ROUNDABORT OFF;
+SET IMPLICIT_TRANSACTIONS OFF;
+SET STATISTICS TIME, IO OFF;
+GO
+
+USE PerformanceMonitor;
+GO
+
+/* upgrade logic here — must be idempotent */