From 6a520fb407629bc65f590a0a3d29c76f86fd6a85 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 14:18:05 -0400 Subject: [PATCH 01/52] Protect unique indexes from sp_IndexCleanup Rule 1 and fix Rule 7 match MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rule 1 (Unused Index) was disabling plain unique nonclustered indexes when usage stats showed zero reads. The existing guards only checked is_primary_key and is_unique_constraint, leaving is_unique=1 indexes from CREATE UNIQUE INDEX unprotected. Added `id.is_unique = 0` to the EXISTS so uniqueness enforcement is never silently removed. Rule 7 (Unique Constraint Replacement) was matching nonclustered indexes with EXTRA key columns against a unique constraint, e.g. treating NC (A, B, C) as an equivalent replacement for UC (A, B). Added the reverse-direction EXCEPT so both key-column sets must be identical for the match to fire — otherwise the "MAKE UNIQUE" promotion would produce a wider unique index that cannot back the same referential integrity shape as the original constraint. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_IndexCleanup/sp_IndexCleanup.sql | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/sp_IndexCleanup/sp_IndexCleanup.sql b/sp_IndexCleanup/sp_IndexCleanup.sql index 363defa4..a705aed4 100644 --- a/sp_IndexCleanup/sp_IndexCleanup.sql +++ b/sp_IndexCleanup/sp_IndexCleanup.sql @@ -3069,6 +3069,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AND id.user_lookups = 0 AND id.is_primary_key = 0 /* Don't disable primary keys */ AND id.is_unique_constraint = 0 /* Don't disable unique constraints */ + AND id.is_unique = 0 /* Don't disable plain unique indexes — they enforce uniqueness even without a constraint */ AND id.is_eligible_for_dedupe = 1 /* Only eligible indexes */ ) AND #index_analysis.index_id <> 1 /* Don't disable clustered indexes */ @@ -3638,7 +3639,13 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AND id2.is_unique_constraint = 1 AND NOT EXISTS ( - /* Verify key columns match between index and unique constraint */ + /* Verify key columns match between index and unique constraint. + Both directions of EXCEPT must be empty so the two key-column + sets are identical — otherwise an index with extra key columns + (e.g. NC (A,B,C) vs UC (A,B)) would be treated as equivalent + and the wider index would get promoted as a MAKE UNIQUE + replacement that cannot actually back the same FK references. + */ SELECT id2_inner.column_name FROM #index_details AS id2_inner @@ -3653,6 +3660,22 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. WHERE id1_inner.index_hash = ia1.index_hash AND id1_inner.is_included_column = 0 ) + AND NOT EXISTS + ( + SELECT + id1_inner.column_name + FROM #index_details AS id1_inner + WHERE id1_inner.index_hash = ia1.index_hash + AND id1_inner.is_included_column = 0 + + EXCEPT + + SELECT + id2_inner.column_name + FROM #index_details AS id2_inner + WHERE id2_inner.index_hash = id2.index_hash + AND id2_inner.is_included_column = 0 + ) ) OPTION(RECOMPILE); From 910cbb4dd9d7faec4a69e048971624eb7195040e Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 14:32:27 -0400 Subject: [PATCH 02/52] Protect narrower unique indexes from sp_IndexCleanup Rule 3 supersession Rule 3 (Key Subset) treats a wider index as a superset of a narrower one when the wider's key columns start with the narrower's. The previous guard only blocked the case where the wider index was non-unique, but allowed a wider UNIQUE index to supersede a narrower UNIQUE index. This is wrong: a unique index on (A) enforces that A is unique, while a unique index on (A, B) only enforces that the pair (A, B) is unique. Disabling the narrower removes a stronger constraint that the wider cannot replicate. Verified against the cumulative harness: IC_H_R3_both_unique no longer flags UX_R3_un_narrow for DISABLE, and no other scenario regressed. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_IndexCleanup/sp_IndexCleanup.sql | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/sp_IndexCleanup/sp_IndexCleanup.sql b/sp_IndexCleanup/sp_IndexCleanup.sql index a705aed4..11ca5c41 100644 --- a/sp_IndexCleanup/sp_IndexCleanup.sql +++ b/sp_IndexCleanup/sp_IndexCleanup.sql @@ -3217,8 +3217,11 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AND ia1.index_name <> ia2.index_name AND ia2.key_columns LIKE (REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(ia1.key_columns, '~', '~~'), '[', '~['), ']', '~]'), '_', '~_'), '%', '~%') + N', %') ESCAPE '~' /* ia2 has wider key that starts with ia1's key */ AND ISNULL(ia1.filter_definition, '') = ISNULL(ia2.filter_definition, '') /* Matching filters */ - /* Exception: If narrower index is unique and wider is not, they should not be merged */ - AND NOT (ia1.is_unique = 1 AND ia2.is_unique = 0) + /* Never disable a unique narrower index via supersession. + A unique index on (A) enforces "A is unique" — a wider index on + (A, B) only enforces "(A, B) is unique", which is a weaker guarantee. + This applies whether the wider index is unique or not. */ + AND ia1.is_unique = 0 WHERE ia1.consolidation_rule IS NULL /* Not already processed */ AND ia2.consolidation_rule IS NULL /* Not already processed */ /* Don't disable unique constraints — but allow them as the wider (target) index */ From c9efb7dbba17807e5a86d9265f1d013b0072611a Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 14:40:16 -0400 Subject: [PATCH 03/52] Guard sp_IndexCleanup Rule 7.5 against foreign-key dependencies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rule 7.5 proposes disabling a unique constraint and promoting a matching nonclustered index to take its place. When an inbound foreign key references the constraint, the generated script is broken two ways: 1. ALTER TABLE ... DROP CONSTRAINT on a UC referenced by an FK is blocked by SQL Server — the whole cleanup script fails mid-run. 2. If the user falls back to ALTER INDEX ... DISABLE on the UC's backing index, SQL Server silently disables the FK (is_disabled=1, is_not_trusted=1) and lets orphan rows into the child table. The warning is trivial to miss in multi-statement output. Added a NOT EXISTS against is_foreign_key_reference on the UC's key columns so the DISABLE UPDATE skips those constraints. The downstream MAKE UNIQUE cleanup at line 3745 already reverts MAKE UNIQUE when the matching UC wasn't marked DISABLE, so a single guard covers both sides. Verified against the harness: IC_H_R7_fk (UC with inbound FK) now emits only compression rebuilds. IC_H_R7_match (same shape, no FK) still fires Rule 7 normally. No other scenario regressed. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_IndexCleanup/sp_IndexCleanup.sql | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/sp_IndexCleanup/sp_IndexCleanup.sql b/sp_IndexCleanup/sp_IndexCleanup.sql index 11ca5c41..8b005a59 100644 --- a/sp_IndexCleanup/sp_IndexCleanup.sql +++ b/sp_IndexCleanup/sp_IndexCleanup.sql @@ -3707,6 +3707,21 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ON ia_nc.scope_hash = ia_uc.scope_hash /* Same database and object */ AND ia_nc.index_name <> ia_uc.index_name /* Different index */ AND ia_uc.key_columns = ia_nc.key_columns /* Verify key columns EXACT match */ + WHERE NOT EXISTS + ( + /* Don't propose replacing a unique constraint that backs an inbound + foreign key. Dropping it would be blocked by SQL Server, and + ALTER INDEX ... DISABLE on its backing index silently disables + every FK referencing it (leaving orphan rows possible). The user's + cleanup script would either error mid-execution or break + referential integrity without warning. */ + SELECT + 1/0 + FROM #index_details AS id_fk + WHERE id_fk.index_hash = ia_uc.index_hash + AND id_fk.is_foreign_key_reference = 1 + AND id_fk.is_included_column = 0 + ) OPTION(RECOMPILE); /* Second, mark nonclustered indexes to be made unique */ From e9eaee88f7828751c29be8eaa16ea945e791ca32 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 15:10:51 -0400 Subject: [PATCH 04/52] Protect primary keys from sp_IndexCleanup Rule 2 and Rule 5 DISABLE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rules 2 (Exact Duplicate) and 5 (Key Duplicate, different includes) previously only excluded unique constraints from the loser side. A nonclustered primary key could still end up as the DISABLE candidate if the priority+alphabetical tie-break happened to favor another duplicate. Disabling a PK's backing index is particularly dangerous: SQL Server silently cascades the disable to every inbound FK (is_disabled=1, is_not_trusted=1), after which orphan rows can be inserted into the child tables without error. The user's cleanup script would leave referential integrity broken on success, not failure. Extended the loser-side NOT EXISTS to also match is_primary_key = 1. The opposite permutation of each pair (with the PK as the keeper ia2) still runs, so regular nonclustered duplicates of a PK are still cleaned up — the PK is only protected from being picked as the loser. Verified against the harness: IC_H_R2_pk_nc and IC_H_R5_pk_nc still DISABLE the non-PK duplicate, and no scenario regressed except a cosmetic superseded_info column on one PK row changing to N/A. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_IndexCleanup/sp_IndexCleanup.sql | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/sp_IndexCleanup/sp_IndexCleanup.sql b/sp_IndexCleanup/sp_IndexCleanup.sql index 8b005a59..b0988499 100644 --- a/sp_IndexCleanup/sp_IndexCleanup.sql +++ b/sp_IndexCleanup/sp_IndexCleanup.sql @@ -3115,14 +3115,20 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AND ia1.exact_match_hash = ia2.exact_match_hash /* Exact match: keys + includes + filter */ WHERE ia1.consolidation_rule IS NULL /* Not already processed */ AND ia2.consolidation_rule IS NULL /* Not already processed */ - /* Exclude unique constraints - we'll handle those separately in Rule 7 */ + /* Exclude unique constraints and primary keys on the loser (ia1) side. + Rule 2 is only allowed to DISABLE a regular nonclustered index — never + a PK or UC, both of which back FK referential integrity and cannot be + safely disabled. UCs are still processed as Rule 7.5 targets; PKs are + off-limits entirely for disabling. The opposite permutation of this + pair (with the PK as ia2, the keeper) still runs and correctly + disables the non-PK duplicate. */ AND NOT EXISTS ( SELECT 1/0 - FROM #index_details AS id1_uc - WHERE id1_uc.index_hash = ia1.index_hash - AND id1_uc.is_unique_constraint = 1 + FROM #index_details AS id1_pk + WHERE id1_pk.index_hash = ia1.index_hash + AND (id1_pk.is_unique_constraint = 1 OR id1_pk.is_primary_key = 1) ) AND NOT EXISTS ( @@ -3397,14 +3403,17 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AND ISNULL(ia1.included_columns, '') <> ISNULL(ia2.included_columns, '') /* Different includes */ WHERE ia1.consolidation_rule IS NULL /* Not already processed */ AND ia2.consolidation_rule IS NULL /* Not already processed */ - /* Exclude pairs where either one is a unique constraint (we'll handle those separately in Rule 7) */ + /* Exclude unique constraints and primary keys on the loser (ia1) side. + Same reasoning as Rule 2: Rule 5 may only DISABLE a regular NC, not a + PK or UC whose index backs FK referential integrity. UCs are still + processed via Rule 7.5; PKs must never be disabled. */ AND NOT EXISTS ( SELECT 1/0 - FROM #index_details AS id1_uc - WHERE id1_uc.index_hash = ia1.index_hash - AND id1_uc.is_unique_constraint = 1 + FROM #index_details AS id1_pk + WHERE id1_pk.index_hash = ia1.index_hash + AND (id1_pk.is_unique_constraint = 1 OR id1_pk.is_primary_key = 1) ) AND NOT EXISTS ( From 096db4ef1ef7a1f9ed5016b4f91fec9207c0264e Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 15:37:39 -0400 Subject: [PATCH 05/52] Reject READ_ONLY and ERROR Query Store states in sp_QueryStoreCleanup The state check only rejected actual_state = 0 (OFF) and NULL. Query Store also has READ_ONLY (actual_state = 1) and ERROR (actual_state = 3) states, in which sp_query_store_remove_query cannot succeed. On a READ_ONLY database the cleanup cursor would have called the removal proc once per target and failed on every call, producing noisy error output with no useful result. READ_ONLY is commonly triggered automatically when Query Store hits MAX_STORAGE_SIZE_MB, so this is not a rare edge case. Added explicit early-exit branches for actual_state = 1 and 3 with messages pointing the operator at the likely cause (storage limit, readonly_reason). Verified on SQL Server 2022: READ_WRITE (state = 2): runs as before. READ_ONLY (state = 1): new message, clean return. OFF (state = 0): existing message, clean return. Note: Microsoft docs list the state integers in a different order than they actually sort on recent builds. Values confirmed empirically against sys.database_query_store_options.actual_state + actual_state_desc. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql b/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql index d1eafbd2..6e3d1384 100644 --- a/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql +++ b/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql @@ -313,6 +313,25 @@ OPTION(RECOMPILE);'; RETURN; END; + /* + Reject any state where Query Store cannot accept writes. sp_query_store_remove_query + modifies Query Store data; calling it against a READ_ONLY (1) or ERROR (3) database + fails once per target in the cursor, producing noisy error output and leaving the + caller with no useful result. Catch it up front instead. Only actual_state = 2 + (READ_WRITE) is safe for cleanup. + */ + IF @actual_state = 1 + BEGIN + RAISERROR('Query Store is in READ_ONLY state for database %s. Writes are blocked, so cleanup cannot run. This is typically caused by hitting MAX_STORAGE_SIZE_MB or by an explicit READ_ONLY operation_mode.', 16, 1, @database_name) WITH NOWAIT; + RETURN; + END; + + IF @actual_state = 3 + BEGIN + RAISERROR('Query Store is in ERROR state for database %s. Cleanup cannot run until Query Store is recovered (see sys.database_query_store_options.readonly_reason).', 16, 1, @database_name) WITH NOWAIT; + RETURN; + END; + /* Parse @cleanup_targets */ From 427fa9f1132e1cca0abf25201b292a1029126d19 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 15:42:06 -0400 Subject: [PATCH 06/52] Use MIN/MAX/AVG for sp_QuickieStore wait-time column aggregations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The plan-level wait-stats aggregation in the #query_store_wait_stats insert used SUM() for every value including the ones typed and named as min_query_wait_time_ms and max_query_wait_time_ms. Summing per-interval minima and maxima inflates the output by the number of captured intervals and produces numbers the column names promise they won't. Changed the min_/max_ columns to MIN()/MAX(). Also changed avg_query_wait_time_ms from SUM() to AVG(); summed averages are the same kind of unit error. The outer GROUP BY is per (plan_id, wait_category_desc), so MIN/MAX/AVG land on the right scope. Left the HAVING SUM(min_query_wait_time_ms) > 0 as-is — as a gate for "any wait time recorded in any interval" it's correct and changing it would alter the row-filter semantics. total_query_wait_time_ms still uses SUM — that one matches its name. Verified the sproc installs and returns correctly-shaped wait output against PerformanceMonitor on SQL Server 2022. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_QuickieStore/sp_QuickieStore.sql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sp_QuickieStore/sp_QuickieStore.sql b/sp_QuickieStore/sp_QuickieStore.sql index c25e5b85..b25eb0cd 100644 --- a/sp_QuickieStore/sp_QuickieStore.sql +++ b/sp_QuickieStore/sp_QuickieStore.sql @@ -10832,13 +10832,13 @@ SELECT total_query_wait_time_ms = SUM(qsws_with_lasts.total_query_wait_time_ms), avg_query_wait_time_ms = - SUM(qsws_with_lasts.avg_query_wait_time_ms), + AVG(qsws_with_lasts.avg_query_wait_time_ms), last_query_wait_time_ms = MAX(qsws_with_lasts.partitioned_last_query_wait_time_ms), min_query_wait_time_ms = - SUM(qsws_with_lasts.min_query_wait_time_ms), + MIN(qsws_with_lasts.min_query_wait_time_ms), max_query_wait_time_ms = - SUM(qsws_with_lasts.max_query_wait_time_ms) + MAX(qsws_with_lasts.max_query_wait_time_ms) FROM ( SELECT From 344fff5bc426550a06fc88bbcc960f2d29117c58 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 15:48:12 -0400 Subject: [PATCH 07/52] Fix sp_PerfCheck deadlock check for sub-day uptime MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The deadlock rate check (check_id 5103) divided the cumulative deadlock count by DATEDIFF(DAY, sqlserver_start_time, SYSDATETIME()) wrapped in NULLIF(..., 0). For any server whose uptime had not yet crossed a calendar-day boundary, DATEDIFF(DAY, ...) returned 0, NULLIF collapsed the divisor to NULL, and the resulting `rate > 9` comparison evaluated as UNKNOWN in the WHERE clause — which filters out the row, skipping the check entirely. The details CASE already had an ELSE branch formatted for "X deadlocks in Y hours since startup", but that branch was unreachable because WHERE never let a low-uptime row through. Rewrote the priority CASE and WHERE rate calculation to use DATEDIFF(SECOND, ...) with a *86400.0 scale factor, preserving the "more than 9 per day" threshold semantics while working for any uptime >= 1 second. DATEDIFF(SECOND) has ~68 years of safe range so overflow is not a concern. Verified on SQL Server 2022 with 3 days uptime and 725 deadlocks/day: the check now fires with priority 20 (High). Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_PerfCheck/sp_PerfCheck.sql | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/sp_PerfCheck/sp_PerfCheck.sql b/sp_PerfCheck/sp_PerfCheck.sql index 5ee14c6d..e0ab2814 100644 --- a/sp_PerfCheck/sp_PerfCheck.sql +++ b/sp_PerfCheck/sp_PerfCheck.sql @@ -1007,17 +1007,25 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ) SELECT check_id = 5103, + /* + Rate is deadlocks-per-day, computed from DATEDIFF(SECOND, ...) rather than + DATEDIFF(DAY, ...). The DAY-based version rounded sub-day uptime to 0 and + the NULLIF then collapsed the whole expression to NULL, which evaluated as + UNKNOWN in the WHERE below and silently skipped the deadlock check for the + first calendar-day-boundary of server uptime. SECOND-based rate keeps the + threshold semantics identical for any uptime ≥ 1 second. + */ priority = CASE WHEN ( - 1.0 * - p.cntr_value / + p.cntr_value * + 86400.0 / NULLIF ( DATEDIFF ( - DAY, + SECOND, osi.sqlserver_start_time, SYSDATETIME() ), @@ -1027,13 +1035,13 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. THEN 20 /* High: >100 deadlocks/day */ WHEN ( - 1.0 * - p.cntr_value / + p.cntr_value * + 86400.0 / NULLIF ( DATEDIFF ( - DAY, + SECOND, osi.sqlserver_start_time, SYSDATETIME() ), @@ -1074,13 +1082,13 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AND p.cntr_value > 0 AND ( - 1.0 * - p.cntr_value / + p.cntr_value * + 86400.0 / NULLIF ( DATEDIFF ( - DAY, + SECOND, osi.sqlserver_start_time, SYSDATETIME() ), From 4202a17390ede0aca6aaa177b5e2982b7d1201b5 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 15:52:58 -0400 Subject: [PATCH 08/52] Fix sp_HumanEventsBlockViewer last_transaction_started attribute mismatch The column last_transaction_started was read from the blocked-process XML at two sites in the blocked_process_report parsing path, but with different XE attributes: #blocked insert: bd.value('(process/@lasttranstarted)[1]', ...) #blocking insert: bg.value('(process/@lastbatchstarted)[1]', ...) The two result sets are UNION ALL'd into #blocks, so downstream the same column contains transaction-started timestamps on blocked rows and batch-started timestamps on blocking rows. Any consumer comparing the two (e.g. "which side's transaction started earlier?") would get nonsensical answers. The sp_server_diagnostics path above (both blocked and blocking sites) uses @lastbatchstarted, as does the blocking-side BPR read that UNIONs with this row. Aligned the blocked-side BPR read to @lastbatchstarted so all four read sites match. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEventsBlockViewer.sql | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql index 58279174..ce2d51fe 100644 --- a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql +++ b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql @@ -1928,7 +1928,11 @@ SELECT query_text_pre = bd.value('(process/inputbuf/text())[1]', 'nvarchar(max)'), wait_time = bd.value('(process/@waittime)[1]', 'bigint'), transaction_name = bd.value('(process/@transactionname)[1]', 'nvarchar(1024)'), - last_transaction_started = bd.value('(process/@lasttranstarted)[1]', 'datetime2'), + /* lastbatchstarted matches the other three read sites for this column + (including the blocking-side read 120 lines below that UNION ALLs with + this row). Previously this site read @lasttranstarted, leaving the same + column holding two different attributes on blocked vs blocking rows. */ + last_transaction_started = bd.value('(process/@lastbatchstarted)[1]', 'datetime2'), last_transaction_completed = bd.value('(process/@lastbatchcompleted)[1]', 'datetime2'), wait_resource = bd.value('(process/@waitresource)[1]', 'nvarchar(1024)'), lock_mode = bd.value('(process/@lockMode)[1]', 'nvarchar(10)'), From 0a453c373572a09ffb456879d894cd8bdb68a865 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 16:02:44 -0400 Subject: [PATCH 09/52] Coerce sp_HumanEvents @seconds_sample = 0/NULL to 1 second @seconds_sample drives the WAITFOR DELAY that paces the XE session. The duration-math block only populates @waitfor when @seconds_sample > 0, so passing 0 or NULL left @waitfor at its default of N'' and the unconditional WAITFOR DELAY @waitfor later crashed with a syntax error. Tried a RAISERROR/THROW validation first, but the proc body is wrapped in a TRY/CATCH whose CATCH block tries to ALTER EVENT SESSION ... STOP on a session that was never created when the failure happens before session creation. That overrides the original message with a "session does not exist" error and makes the true problem unguessable. Coerce to 1 second instead: NULL silently, 0 with a warning pointing the caller at a more useful value. The proc then completes its normal create / start / 1-second wait / stop / drop flow and returns empty result sets. Callers still get a running proc and a discoverable warning instead of a cryptic crash. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEvents.sql | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/sp_HumanEvents/sp_HumanEvents.sql b/sp_HumanEvents/sp_HumanEvents.sql index c67566d6..6f1b888d 100644 --- a/sp_HumanEvents/sp_HumanEvents.sql +++ b/sp_HumanEvents/sp_HumanEvents.sql @@ -890,6 +890,25 @@ BEGIN RETURN; END; +/* +@seconds_sample drives the WAITFOR DELAY that controls how long the XE session +samples before we shred results. When NULL it's treated as unset. When +explicitly 0 the user is asking for no sampling, which would hit the +unconditional WAITFOR below with an empty @waitfor string and raise a +syntax error. Coerce NULL and 0 to 1 second — the minimum meaningful +value — and warn if 0 was explicit. +*/ +IF @debug = 1 BEGIN RAISERROR(N'Checking seconds_sample parameter', 0, 1) WITH NOWAIT; END; +IF @seconds_sample IS NULL +BEGIN + SET @seconds_sample = 1; +END; +ELSE IF @seconds_sample = 0 +BEGIN + RAISERROR(N'@seconds_sample = 0 is not meaningful (nothing would be sampled). Using 1 second instead. Pass a larger value for real sampling.', 0, 1) WITH NOWAIT; + SET @seconds_sample = 1; +END; + IF @debug = 1 BEGIN RAISERROR(N'Checking query sort order', 0, 1) WITH NOWAIT; END; IF @query_sort_order NOT IN From 0c5f43de190a6427a96a50ae0aad11de51c65fca Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 16:24:15 -0400 Subject: [PATCH 10/52] Preserve decimal precision in sp_PressureDetector size/memory GB math MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Several size/memory columns that carry a _gb suffix were computed with bigint-only arithmetic, forcing integer division to truncate sub-GB values to 0. On Azure Basic/S0 tiers or small tempdb files this reported 0 GB where the real value was 0.5 GB, hiding the very condition the check is meant to surface. Sites fixed: - tempdb min/max size_gb and growth_increment_gb (line ~2287) - database_size_out_gb Azure + on-prem paths (line ~2586/2603) - physical_memory_available_gb and virtual_memory_available_gb from RING_BUFFER_RESOURCE_MONITOR shreds (line ~2657) - max_server_memory_gb from sys.configurations (line ~2942) For each, changed the divisor literals from 1024 / 1024 to 1024.0 / 1024.0 so the expression promotes to decimal, and wrapped the output in CONVERT(decimal(19, 2), ...) to keep a sensible display precision. Separately, tempdb growth columns now filter out is_percent_growth = 1 rows via CASE WHEN — percent-growth files store the percentage in mf.growth, not a page count, so the old * 8 math produced meaningless GB numbers for them. Percent-growth in tempdb is a legacy misconfiguration anyway, and surfacing a wrong GB is worse than surfacing NULL. max_server_memory_gb also needed an explicit CONVERT(bigint, ...) around c.value_in_use because sys.configurations.value_in_use is sql_variant and won't implicitly convert to decimal. Verified on SQL Server 2022: tempdb min/max_size_gb: 1.00 (was 1) max_server_memory_gb: 90.00 (was 90) total_database_size_gb: 244.65 (was 244) Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_PressureDetector/sp_PressureDetector.sql | 53 +++++++++++++-------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/sp_PressureDetector/sp_PressureDetector.sql b/sp_PressureDetector/sp_PressureDetector.sql index 8423ea5b..143ff613 100644 --- a/sp_PressureDetector/sp_PressureDetector.sql +++ b/sp_PressureDetector/sp_PressureDetector.sql @@ -2284,13 +2284,16 @@ OPTION(MAXDOP 1, RECOMPILE);', total_data_files = COUNT_BIG(*), min_size_gb = - MIN(mf.size * 8) / 1024 / 1024, + CONVERT(decimal(19, 2), MIN(mf.size * 8.0) / 1024.0 / 1024.0), max_size_gb = - MAX(mf.size * 8) / 1024 / 1024, + CONVERT(decimal(19, 2), MAX(mf.size * 8.0) / 1024.0 / 1024.0), + /* Exclude percent-growth files: their mf.growth is a percentage, + not page count, so * 8 math produces meaningless GB numbers. + Percent-growth files are legacy/misconfigured in tempdb anyway. */ min_growth_increment_gb = - MIN(mf.growth * 8) / 1024 / 1024, + CONVERT(decimal(19, 2), MIN(CASE WHEN mf.is_percent_growth = 0 THEN mf.growth * 8.0 END) / 1024.0 / 1024.0), max_growth_increment_gb = - MAX(mf.growth * 8) / 1024 / 1024, + CONVERT(decimal(19, 2), MAX(CASE WHEN mf.is_percent_growth = 0 THEN mf.growth * 8.0 END) / 1024.0 / 1024.0), scheduler_total_count = ( SELECT @@ -2576,14 +2579,18 @@ OPTION(MAXDOP 1, RECOMPILE);', @database_size_out = N' SELECT @database_size_out_gb = - SUM + CONVERT ( - CONVERT + decimal(19, 2), + SUM ( - bigint, - df.size - ) - ) * 8 / 1024 / 1024 + CONVERT + ( + bigint, + df.size + ) + ) * 8.0 / 1024.0 / 1024.0 + ) FROM sys.database_files AS df OPTION(MAXDOP 1, RECOMPILE);'; END; @@ -2593,14 +2600,18 @@ OPTION(MAXDOP 1, RECOMPILE);', @database_size_out = N' SELECT @database_size_out_gb = - SUM + CONVERT ( - CONVERT + decimal(19, 2), + SUM ( - bigint, - mf.size - ) - ) * 8 / 1024 / 1024 + CONVERT + ( + bigint, + mf.size + ) + ) * 8.0 / 1024.0 / 1024.0 + ) FROM sys.master_files AS mf WHERE mf.database_id > 4 OPTION(MAXDOP 1, RECOMPILE);'; @@ -2654,9 +2665,9 @@ OPTION(MAXDOP 1, RECOMPILE);', indicators_system = t.record.value('(/Record/ResourceMonitor/IndicatorsSystem)[1]', 'integer'), physical_memory_available_gb = - t.record.value('(/Record/MemoryRecord/AvailablePhysicalMemory)[1]', 'bigint') / 1024 / 1024, + CONVERT(decimal(19, 2), t.record.value('(/Record/MemoryRecord/AvailablePhysicalMemory)[1]', 'bigint') / 1024.0 / 1024.0), virtual_memory_available_gb = - t.record.value('(/Record/MemoryRecord/AvailableVirtualAddressSpace)[1]', 'bigint') / 1024 / 1024 + CONVERT(decimal(19, 2), t.record.value('(/Record/MemoryRecord/AvailableVirtualAddressSpace)[1]', 'bigint') / 1024.0 / 1024.0) FROM sys.dm_os_sys_info AS osi CROSS JOIN ( @@ -2944,12 +2955,12 @@ OPTION(MAXDOP 1, RECOMPILE);', SELECT CONVERT ( - bigint, - c.value_in_use + decimal(19, 2), + CONVERT(bigint, c.value_in_use) / 1024.0 ) FROM sys.configurations AS c WHERE c.name = N''max server memory (MB)'' - ) / 1024, + ), max_memory_grant_cap = @memory_grant_cap, memory_model = From e7689433176e9c80706fcc6eff7728a7e06a1d0a Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 16:29:43 -0400 Subject: [PATCH 11/52] Escape double quotes in sp_LogHunter @custom_message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The #search.command computed column builds an xp_readerrorlog call with the search string wrapped in double quotes: EXECUTE master.dbo.xp_readerrorlog [log], 1, "", " ", ... @custom_message is concatenated straight into the "" slot without escaping, so a literal " in the user-supplied message closes the first argument early. The rest of the @custom_message gets parsed as T-SQL and sp_executesql raises "Incorrect syntax near '+'" or a similar message. Verified by calling with @custom_message = N'hello"+injection'. Doubled the quote on the way in: REPLACE(@custom_message, N'"', N'""'). xp_readerrorlog accepts the doubled quote as a literal inside the search string. Normal strings (no quotes) behave the same as before. Verified against the QsCleanupTest disk-full message lookup with @custom_message = N'Microsoft'. Note: this is not a privilege-escalation vector — xp_readerrorlog already requires securityadmin-level access — but cryptic syntax errors from a valid-looking input are a real operator footgun. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_LogHunter/sp_LogHunter.sql | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sp_LogHunter/sp_LogHunter.sql b/sp_LogHunter/sp_LogHunter.sql index 420374b3..93af4b18 100644 --- a/sp_LogHunter/sp_LogHunter.sql +++ b/sp_LogHunter/sp_LogHunter.sql @@ -491,7 +491,13 @@ BEGIN ( VALUES ( - N'"' + @custom_message + '"', + /* xp_readerrorlog search strings are wrapped in double quotes + (see the #search.command computed column), so any literal " + inside the user-supplied @custom_message must be doubled to + avoid closing the argument early and producing an + "Incorrect syntax near '+'" error when sp_executesql parses + the generated batch. */ + N'"' + REPLACE(@custom_message, N'"', N'""') + N'"', N'"' + CONVERT(nvarchar(10), DATEADD(DAY, @days_back, SYSDATETIME()), 112) + N'"', N'"' + CONVERT(nvarchar(30), @start_date) + N'"', N'"' + CONVERT(nvarchar(30), @end_date) + N'"' From 300ec242c2106e9fa018af414655fbe19f02cb06 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 17:25:29 -0400 Subject: [PATCH 12/52] Cap sp_IndexCleanup subset-chain resolution at 100 iterations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The WHILE loop that flattens Rule 3 subset chains (A → B → C collapsed to A → C) has no iteration limit. Under the current Rule 3 logic a cycle can't form — supersession points strictly toward wider indexes, which is a partial order — but a future bug introduced elsewhere that set target_index_name to a cycling value would hang the sproc until tempdb filled. Added an iteration counter and a cap of 100, plus a severity-16 RAISERROR if the cap is ever reached so the failure mode is loud and actionable rather than silent. Chain depth of 100 would require 100 distinct indexes on a single table in a prefix-supersession chain — not realistic in practice. Verified the sproc still runs clean and Rule 3 still fires correctly against IC_H_R3_plain (narrower IX_R3_narrow disabled, wider IX_R3_wide kept). No cap-hit warning under current behavior. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_IndexCleanup/sp_IndexCleanup.sql | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/sp_IndexCleanup/sp_IndexCleanup.sql b/sp_IndexCleanup/sp_IndexCleanup.sql index 363defa4..87eb72ea 100644 --- a/sp_IndexCleanup/sp_IndexCleanup.sql +++ b/sp_IndexCleanup/sp_IndexCleanup.sql @@ -3274,9 +3274,20 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* Resolve subset chains: if A -> B -> C, flatten so A -> C directly. Without this, includes from transitive subsets are lost because Rule 6 only collects includes from direct subsets of the final superset. */ + /* + Flatten subset chains (A → B → C, so A → C) one hop at a time. Rule 3 + only points at strictly wider indexes, which is a partial order, so a + cycle cannot form under current logic. Cap iterations anyway so a + future bug that introduces a cycle surfaces as a warning rather than + an infinite loop that eventually fills tempdb. A chain depth of 100 + would require 100 distinct indexes on a single table in a prefix + supersession chain — not realistic in practice. + */ DECLARE @chains_resolved bigint = 1; + DECLARE @chain_iterations integer = 0; WHILE @chains_resolved > 0 + AND @chain_iterations < 100 BEGIN UPDATE ia1 @@ -3294,6 +3305,12 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AND ia1.target_index_name <> ia2.target_index_name; SET @chains_resolved = ROWCOUNT_BIG(); + SET @chain_iterations += 1; + END; + + IF @chain_iterations >= 100 + BEGIN + RAISERROR('sp_IndexCleanup chain resolution hit the 100-iteration cap. This should never happen under the current rules; a cycle in target_index_name likely indicates a bug introduced by a later rule change. The recommendations below may be inconsistent — please investigate before running them.', 16, 1) WITH NOWAIT; END; IF @debug = 1 From c01543b32782ad9be9310b92c0ae583189593a0e Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 17:30:04 -0400 Subject: [PATCH 13/52] Guard sp_HumanEventsBlockViewer recursive blocking-tree against cycles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The blocking-hierarchy CTE (lines ~2145-2203) used OPTION(MAXRECURSION 0), removing the default safety ceiling entirely. Two sessions can legitimately appear to block each other in the same monitor_loop window — before the deadlock monitor resolves — and the recursive join bg.blocking_desc = h.blocked_desc has no cycle check. Once an anchor row feeds into a cycle in the blocked tree, the CTE has no exit and the UPDATE runs until tempdb fills. Added a guard to the recursive step: WHERE h.sort_order NOT LIKE '%' + bg.blocked_desc + '%' sort_order already accumulates every (SPID:ECID) visited on this branch, so a LIKE check against the candidate blocked_desc breaks the recursion before a revisit. Reverted MAXRECURSION 0 to MAXRECURSION 100 as a backstop in case an unexpected blocked_desc format slips past the LIKE guard — hitting 100 raises a catchable error instead of running unbounded. Verified in isolation: start→a, a→b, b→a (chain into cycle): Without guard, MAXRECURSION 100: "maximum recursion ... exhausted" With guard: two rows returned (start→a at level 0, a→b at level 1), the b→a edge that would close the cycle is correctly skipped Normal chain 1→2→3→4: three rows, all levels populated (regression check) Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEventsBlockViewer.sql | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql index 58279174..03717dec 100644 --- a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql +++ b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql @@ -2189,6 +2189,15 @@ WITH JOIN #blocking AS bg ON bg.monitor_loop = h.monitor_loop AND bg.blocking_desc = h.blocked_desc + /* + Cycle guard: skip a row whose blocked_desc already appears in the + accumulated sort_order. Two sessions can briefly appear to block each + other in the same monitor_loop (before the deadlock monitor fires), + and without a guard the recursion has no exit. The sort_order string + contains every (SPID:ECID) we've visited on this branch; checking for + the candidate blocked_desc before we follow it prevents the cycle. + */ + WHERE h.sort_order NOT LIKE '%' + bg.blocked_desc + '%' ) UPDATE #blocked @@ -2200,7 +2209,13 @@ JOIN hierarchy AS h ON h.monitor_loop = b.monitor_loop AND h.blocking_desc = b.blocking_desc AND h.blocked_desc = b.blocked_desc -OPTION(RECOMPILE, MAXRECURSION 0); +/* +MAXRECURSION 100 (the default) is plenty for real blocking chains and +still acts as a backstop if the cycle guard above is ever bypassed by +a blocked_desc that doesn't format the same way as expected. Reverted +from MAXRECURSION 0 which gave the runaway case no ceiling at all. +*/ +OPTION(RECOMPILE, MAXRECURSION 100); IF @debug = 1 BEGIN From f2d3b2aa43779b72bcb8f2582ce2f47a45eceac9 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 17:32:05 -0400 Subject: [PATCH 14/52] Gate sp_PerfCheck LPIM recommendation off Azure MI and AWS RDS The LPIM check (check_id 4105) only excluded @azure_sql_db from its firing condition. Azure Managed Instance and AWS RDS both run SQL Server on managed platforms that don't expose the LockPagesInMemory user right, so telling the operator to enable LPIM on those edition/platforms is unactionable noise. The IFI check immediately below this one (line ~1177) already excludes all three, so LPIM is just catching up. Added AND @azure_managed_instance = 0 AND @aws_rds = 0 to the outer IF gate. On-prem check still fires under the same conditions as before (memory model = CONVENTIONAL, physical RAM >= 32 GB). Verified the sproc installs clean and the LPIM finding does not appear on the test SQL 2022 instance (which has memory model = LOCK_PAGES, so the check correctly suppresses). Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_PerfCheck/sp_PerfCheck.sql | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sp_PerfCheck/sp_PerfCheck.sql b/sp_PerfCheck/sp_PerfCheck.sql index 5ee14c6d..475c022a 100644 --- a/sp_PerfCheck/sp_PerfCheck.sql +++ b/sp_PerfCheck/sp_PerfCheck.sql @@ -1134,8 +1134,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. FROM sys.dm_os_sys_info AS osi; END; - /* Check if Lock Pages in Memory is enabled (on-prem and managed instances only) */ + /* Check if Lock Pages in Memory is enabled. + Only on-prem can change LPIM. Azure Managed Instance and AWS RDS + both run SQL Server on platforms that don't expose the + LockPagesInMemory user right, so flagging them is unactionable + noise. Matches the IFI check gate below for consistency. */ IF @azure_sql_db = 0 + AND @azure_managed_instance = 0 + AND @aws_rds = 0 AND @has_view_server_state = 1 BEGIN INSERT INTO From ff3bc325cfec8a195ec020f34c687fdc8227ad57 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 17:44:53 -0400 Subject: [PATCH 15/52] Remove sp_QuickieStore wait-stats per-interval TOP (5) pre-filter The CROSS APPLY that joined to sys.query_store_wait_stats had: SELECT TOP (5) qsws.* FROM sys.query_store_wait_stats AS qsws WHERE qsws.runtime_stats_interval_id = qsrs.runtime_stats_interval_id AND qsws.plan_id = qsrs.plan_id ... ORDER BY qsws.avg_query_wait_time_ms DESC so for each (interval, plan) it kept only the 5 wait categories with the highest per-interval avg wait time and discarded the rest. The outer query then GROUP BYs (plan_id, wait_category_desc) and sums across intervals. The net effect: a wait category ranked 6+ in one interval but present in another silently loses the first interval's contribution, making the same plan's totals inconsistent run-to-run depending on which intervals it executed in. Removed the TOP (and the now-unused ORDER BY) so every captured wait category contributes to the outer aggregation. QS caps wait categories to a small set per (interval, plan), so removing the TOP does not blow up row counts. Verified sp_QuickieStore installs clean and runs without error against PerformanceMonitor on SQL Server 2022 with @wait_filter. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_QuickieStore/sp_QuickieStore.sql | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/sp_QuickieStore/sp_QuickieStore.sql b/sp_QuickieStore/sp_QuickieStore.sql index b25eb0cd..cf553674 100644 --- a/sp_QuickieStore/sp_QuickieStore.sql +++ b/sp_QuickieStore/sp_QuickieStore.sql @@ -10863,15 +10863,24 @@ FROM FROM #query_store_runtime_stats AS qsrs CROSS APPLY ( - SELECT TOP (5) + /* + Pull every wait category captured for this (interval, plan). + The previous TOP (5) ORDER BY avg_query_wait_time_ms DESC here + dropped wait categories ranked 6+ per interval before the outer + GROUP BY ran, so a category that was (say) 6th worst in one + interval but 2nd worst in another would silently have the first + interval''s contribution missing from its totals. The outer + aggregation groups by (plan_id, wait_category_desc) and the + number of wait categories per interval is capped by QS at a + small set, so removing the TOP does not explode row counts. + */ + SELECT qsws.* FROM ' + @database_name_quoted + N'.sys.query_store_wait_stats AS qsws WHERE qsws.runtime_stats_interval_id = qsrs.runtime_stats_interval_id AND qsws.plan_id = qsrs.plan_id AND qsws.wait_category > 0 AND qsws.min_query_wait_time_ms > 0 - ORDER BY - qsws.avg_query_wait_time_ms DESC ) AS qsws WHERE qsrs.database_id = @database_id ) AS qsws_with_lasts From 4ffacfa0753fcaf91f61b29e3b58f3ba24a30876 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 17:48:06 -0400 Subject: [PATCH 16/52] Anchor sp_HumanEvents cleanup LIKE to its own session names MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The @cleanup = 1 path built a DROP EVENT SESSION list for every row matching N'%HumanEvents_%'. Two problems: 1. Leading % — unanchored, so N'MyHumanEvents_AppSession' (a user's own XE session whose name happens to include "HumanEvents") matched and got dropped. 2. Unescaped _ — LIKE treats _ as a single-character wildcard, so N'HumanEventsMonitor' (no literal underscore at all) matched via the trailing % plus the _ wildcard absorbing any one character. Changed to two anchored patterns with the underscore escaped via a bracket class, matching exactly what sp_HumanEvents creates: LIKE N'HumanEvents[_]%' OR LIKE N'keeper[_]HumanEvents[_]%' Verified against a synthetic session-name set: OLD matched 4 of 5 names (3 legitimate + HumanEventsMonitor + MyHumanEvents_AppSession — 2 collateral-damage matches) NEW matched exactly the 2 sp_HumanEvents sessions (HumanEvents_waits_abc123, keeper_HumanEvents_blocking) Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEvents.sql | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/sp_HumanEvents/sp_HumanEvents.sql b/sp_HumanEvents/sp_HumanEvents.sql index 6f1b888d..ea31dd02 100644 --- a/sp_HumanEvents/sp_HumanEvents.sql +++ b/sp_HumanEvents/sp_HumanEvents.sql @@ -4831,7 +4831,22 @@ BEGIN SET @executer = QUOTENAME(@output_database_name) + N'.sys.sp_executesql '; - /*Clean up sessions*/ + /* + Clean up sessions. Match only what sp_HumanEvents itself creates: + HumanEvents__ (one-shot, @keep_alive = 0) + keeper_HumanEvents_ (@keep_alive = 1) + + Previous pattern N'%HumanEvents_%' had two issues: + - unanchored leading % — a user session named "MyHumanEventsFoo" + would match and get dropped. + - unescaped _ — LIKE treats _ as a single-char wildcard, so + "HumanEventsMonitor" (no literal underscore) would match via the + trailing % + the _ wildcard eating any one character. + + Anchored to the prefix and escaped the literal underscore with a + bracket class so an operator using HumanEvents-adjacent names for + their own XE sessions isn't collateral damage. + */ IF @azure = 0 BEGIN SELECT @@ -4843,7 +4858,8 @@ BEGIN FROM sys.server_event_sessions AS ses LEFT JOIN sys.dm_xe_sessions AS dxs ON dxs.name = ses.name - WHERE ses.name LIKE N'%HumanEvents_%'; + WHERE ses.name LIKE N'HumanEvents[_]%' + OR ses.name LIKE N'keeper[_]HumanEvents[_]%'; END; ELSE BEGIN @@ -4856,7 +4872,8 @@ BEGIN FROM sys.database_event_sessions AS ses LEFT JOIN sys.dm_xe_database_sessions AS dxs ON dxs.name = ses.name - WHERE ses.name LIKE N'%HumanEvents_%'; + WHERE ses.name LIKE N'HumanEvents[_]%' + OR ses.name LIKE N'keeper[_]HumanEvents[_]%'; END; EXECUTE sys.sp_executesql From b7add5134b5e4efe0093307cc113ecf7dd522168 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 17:59:38 -0400 Subject: [PATCH 17/52] Weight sp_QuickieStore regression comparator by count_executions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The regression_metric_average and current_metric_average SELECTs built their per-unit-work averages with AVG(qsrs.avg_cpu_time), AVG(qsrs.avg_duration), etc. — but qsrs.avg_* are themselves per-interval averages, so AVG over them is an unweighted mean of means. An interval with 1 execution at 1000 ms gets the same pull on the number as an interval with 10,000 executions at 10 ms. Regression detection then flags sparse outliers as movement, and misses real load changes that play out across many intervals with similar avg. Changed the per-unit-work cases (cpu, logical/physical reads, writes, duration, memory, tempdb, rows) in both the baseline and current SELECTs to: SUM(qsrs.avg_metric * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0) Left alone: - 'total *' cases — already SUM(avg * count), which matches intent. - 'executions' — qsrs.count_executions is a count, not an average, so plain AVG per interval is meaningful. - wait branch — waits.total_query_wait_time_ms is already a per-interval total, so AVG across intervals is the right shape. Verified sp_QuickieStore installs clean and runs against PerformanceMonitor on SQL Server 2022 with @regression_baseline_start_date/@regression_comparator = 'relative' /@regression_direction = 'regressed' without errors. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_QuickieStore/sp_QuickieStore.sql | 51 ++++++++++++++++++----------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/sp_QuickieStore/sp_QuickieStore.sql b/sp_QuickieStore/sp_QuickieStore.sql index cf553674..b6e0627d 100644 --- a/sp_QuickieStore/sp_QuickieStore.sql +++ b/sp_QuickieStore/sp_QuickieStore.sql @@ -8873,22 +8873,29 @@ BEGIN @sql += N' SELECT qsq.query_hash, - /* All of these but count_executions are already floats. */ + /* All of these but count_executions are already floats. + qsrs.avg_* columns are themselves per-interval averages, so + AVG(avg_*) is an unweighted mean of means. Weight by + count_executions to get the true cross-interval average — + otherwise intervals with very few executions get the same + pull on the number as intervals with many, and regression + detection skews toward sparse outlier intervals. */ regression_metric_average = CONVERT ( float, ' + CASE @sort_order - WHEN 'cpu' THEN N'AVG(qsrs.avg_cpu_time)' - WHEN 'logical reads' THEN N'AVG(qsrs.avg_logical_io_reads)' - WHEN 'physical reads' THEN N'AVG(qsrs.avg_physical_io_reads)' - WHEN 'writes' THEN N'AVG(qsrs.avg_logical_io_writes)' - WHEN 'duration' THEN N'AVG(qsrs.avg_duration)' - WHEN 'memory' THEN N'AVG(qsrs.avg_query_max_used_memory)' - WHEN 'tempdb' THEN CASE WHEN @new = 1 THEN N'AVG(qsrs.avg_tempdb_space_used)' ELSE N'AVG(qsrs.avg_cpu_time)' END + WHEN 'cpu' THEN N'SUM(qsrs.avg_cpu_time * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' + WHEN 'logical reads' THEN N'SUM(qsrs.avg_logical_io_reads * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' + WHEN 'physical reads' THEN N'SUM(qsrs.avg_physical_io_reads * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' + WHEN 'writes' THEN N'SUM(qsrs.avg_logical_io_writes * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' + WHEN 'duration' THEN N'SUM(qsrs.avg_duration * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' + WHEN 'memory' THEN N'SUM(qsrs.avg_query_max_used_memory * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' + WHEN 'tempdb' THEN CASE WHEN @new = 1 THEN N'SUM(qsrs.avg_tempdb_space_used * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' ELSE N'SUM(qsrs.avg_cpu_time * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' END + /* count_executions per interval is meaningful as a plain mean — it''s a count, not an average-of-averages. */ WHEN 'executions' THEN N'AVG(qsrs.count_executions)' - WHEN 'rows' THEN N'AVG(qsrs.avg_rowcount)' + WHEN 'rows' THEN N'SUM(qsrs.avg_rowcount * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' WHEN 'total cpu' THEN N'SUM(qsrs.avg_cpu_time * qsrs.count_executions)' WHEN 'total logical reads' THEN N'SUM(qsrs.avg_logical_io_reads * qsrs.count_executions)' WHEN 'total physical reads' THEN N'SUM(qsrs.avg_physical_io_reads * qsrs.count_executions)' @@ -8897,7 +8904,8 @@ BEGIN WHEN 'total memory' THEN N'SUM(qsrs.avg_query_max_used_memory * qsrs.count_executions)' WHEN 'total tempdb' THEN CASE WHEN @new = 1 THEN N'SUM(qsrs.avg_tempdb_space_used * qsrs.count_executions)' ELSE N'SUM(qsrs.avg_cpu_time * qsrs.count_executions)' END WHEN 'total rows' THEN N'SUM(qsrs.avg_rowcount * qsrs.count_executions)' - ELSE CASE WHEN @sort_order_is_a_wait = 1 THEN N'AVG(waits.total_query_wait_time_ms)' ELSE N'AVG(qsrs.avg_cpu_time)' END + /* Waits and the fallback path — waits are per-interval totals so AVG is correct; fallback mirrors cpu path. */ + ELSE CASE WHEN @sort_order_is_a_wait = 1 THEN N'AVG(waits.total_query_wait_time_ms)' ELSE N'SUM(qsrs.avg_cpu_time * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' END END + N' ) @@ -8985,22 +8993,25 @@ BEGIN @sql += N' SELECT qsq.query_hash, - /* All of these but count_executions are already floats. */ + /* All of these but count_executions are already floats. + Weighted by count_executions so the current-window average + matches the baseline-window computation (see baseline block + above) and regression percentages compare like with like. */ current_metric_average = CONVERT ( float, ' + CASE @sort_order - WHEN 'cpu' THEN N'AVG(qsrs.avg_cpu_time)' - WHEN 'logical reads' THEN N'AVG(qsrs.avg_logical_io_reads)' - WHEN 'physical reads' THEN N'AVG(qsrs.avg_physical_io_reads)' - WHEN 'writes' THEN N'AVG(qsrs.avg_logical_io_writes)' - WHEN 'duration' THEN N'AVG(qsrs.avg_duration)' - WHEN 'memory' THEN N'AVG(qsrs.avg_query_max_used_memory)' - WHEN 'tempdb' THEN CASE WHEN @new = 1 THEN N'AVG(qsrs.avg_tempdb_space_used)' ELSE N'AVG(qsrs.avg_cpu_time)' END + WHEN 'cpu' THEN N'SUM(qsrs.avg_cpu_time * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' + WHEN 'logical reads' THEN N'SUM(qsrs.avg_logical_io_reads * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' + WHEN 'physical reads' THEN N'SUM(qsrs.avg_physical_io_reads * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' + WHEN 'writes' THEN N'SUM(qsrs.avg_logical_io_writes * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' + WHEN 'duration' THEN N'SUM(qsrs.avg_duration * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' + WHEN 'memory' THEN N'SUM(qsrs.avg_query_max_used_memory * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' + WHEN 'tempdb' THEN CASE WHEN @new = 1 THEN N'SUM(qsrs.avg_tempdb_space_used * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' ELSE N'SUM(qsrs.avg_cpu_time * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' END WHEN 'executions' THEN N'AVG(qsrs.count_executions)' - WHEN 'rows' THEN N'AVG(qsrs.avg_rowcount)' + WHEN 'rows' THEN N'SUM(qsrs.avg_rowcount * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' WHEN 'total cpu' THEN N'SUM(qsrs.avg_cpu_time * qsrs.count_executions)' WHEN 'total logical reads' THEN N'SUM(qsrs.avg_logical_io_reads * qsrs.count_executions)' WHEN 'total physical reads' THEN N'SUM(qsrs.avg_physical_io_reads * qsrs.count_executions)' @@ -9009,7 +9020,7 @@ BEGIN WHEN 'total memory' THEN N'SUM(qsrs.avg_query_max_used_memory * qsrs.count_executions)' WHEN 'total tempdb' THEN CASE WHEN @new = 1 THEN N'SUM(qsrs.avg_tempdb_space_used * qsrs.count_executions)' ELSE N'SUM(qsrs.avg_cpu_time * qsrs.count_executions)' END WHEN 'total rows' THEN N'SUM(qsrs.avg_rowcount * qsrs.count_executions)' - ELSE CASE WHEN @sort_order_is_a_wait = 1 THEN N'AVG(waits.total_query_wait_time_ms)' ELSE N'AVG(qsrs.avg_cpu_time)' END + ELSE CASE WHEN @sort_order_is_a_wait = 1 THEN N'AVG(waits.total_query_wait_time_ms)' ELSE N'SUM(qsrs.avg_cpu_time * qsrs.count_executions) / NULLIF(SUM(CONVERT(float, qsrs.count_executions)), 0)' END END + N' ) From 48315e84d088417b54a6835bbdfbac34c775ef94 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 18:03:05 -0400 Subject: [PATCH 18/52] Add @pause_milliseconds batch pacing to sp_QueryStoreCleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The removal cursor calls sys.sp_query_store_remove_query once per query_id back-to-back. On a bulk purge of tens of thousands of queries that hammers Query Store's internal writes and blocks concurrent QS writers without relief. Added @pause_milliseconds (integer, default 0). When > 0, the cursor sleeps that many ms after each remove via WAITFOR DELAY. Clamped to [0, 60000] so a fat-finger doesn't produce a multi-hour run. The delay string is built once per execution as hh:mm:ss.mmm (style 114) from DATEADD(MILLISECOND, @pause_milliseconds, '00:00:00') — verified both 100 and 60000 render correctly and WAITFOR accepts the string. @pause_milliseconds = 0 (default) preserves the existing no-sleep behavior. Help text and parameter metadata updated to match. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql b/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql index 6e3d1384..8d578741 100644 --- a/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql +++ b/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql @@ -40,6 +40,7 @@ ALTER PROCEDURE @dedupe_by varchar(50) = 'all', /*deduplication strategy: all, query_hash, plan_hash, none*/ @min_age_days integer = NULL, /*only remove queries not executed in this many days*/ @report_only bit = 0, /*1 = report what would be removed without removing*/ + @pause_milliseconds integer = 0, /*sleep this many ms between each remove to pace log writes on bulk purges; 0 = no sleep (default)*/ @debug bit = 0, /*prints dynamic sql and diagnostics*/ @help bit = 0, /*prints help information*/ @version varchar(30) = NULL OUTPUT, /*OUTPUT; for support*/ @@ -96,6 +97,8 @@ BEGIN THEN 'only remove queries whose last execution is older than this many days; NULL = no age filter' WHEN N'@report_only' THEN 'report what would be removed without removing' + WHEN N'@pause_milliseconds' + THEN 'sleep this many ms between each sp_query_store_remove_query call so a bulk purge does not hammer the log; 0 = no sleep' WHEN N'@debug' THEN 'prints dynamic sql and diagnostics' WHEN N'@help' @@ -120,6 +123,8 @@ BEGIN THEN 'any positive integer, e.g. 7, 30, 90' WHEN N'@report_only' THEN '0 or 1' + WHEN N'@pause_milliseconds' + THEN 'any non-negative integer, e.g. 25, 100, 500' WHEN N'@debug' THEN '0 or 1' WHEN N'@help' @@ -144,6 +149,8 @@ BEGIN THEN 'NULL; no age filter' WHEN N'@report_only' THEN '0' + WHEN N'@pause_milliseconds' + THEN '0' WHEN N'@debug' THEN '0' WHEN N'@help' @@ -939,6 +946,35 @@ OPTION(RECOMPILE);'; FROM @c INTO @query_id; + /* + Precompute the WAITFOR DELAY string once — WAITFOR's argument has to + be a varchar literal or variable, so we convert @pause_milliseconds + to hh:mm:ss.mmm format up front. Clamp to [0, 60000] to avoid + surprising 10-minute sleeps from a fat-finger and negative values + that would raise at the WAITFOR. + */ + DECLARE + @pause_delay varchar(12) = N''; + + IF @pause_milliseconds IS NOT NULL + AND @pause_milliseconds > 0 + BEGIN + IF @pause_milliseconds > 60000 + BEGIN + SELECT + @pause_milliseconds = 60000; + END; + + SELECT + @pause_delay = + CONVERT + ( + varchar(12), + DATEADD(MILLISECOND, @pause_milliseconds, CONVERT(time(3), '00:00:00')), + 114 + ); + END; + WHILE @@FETCH_STATUS = 0 BEGIN SELECT @@ -963,6 +999,11 @@ OPTION(RECOMPILE);'; RAISERROR('Query %I64d of %I64d: query_id %I64d not removed (%s)', 0, 1, @current, @total, @query_id, @error_message) WITH NOWAIT; END CATCH; + IF @pause_delay <> N'' + BEGIN + WAITFOR DELAY @pause_delay; + END; + FETCH NEXT FROM @c INTO @query_id; From c460316841e668a99a10efbbd0aecaf96ffaa053 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 18:14:41 -0400 Subject: [PATCH 19/52] Honor @pending_task_threshold in sp_HealthParser scheduler shreds @pending_task_threshold documents the minimum pending-task count the caller wants to see, but two shreds silently ignored it: 1. #scheduler_details WHERE clause (line ~2954): AND (... pendingTasks[.>= sql:variable("@pending_task_threshold")] ... OR @warnings_only = 0) The `OR @warnings_only = 0` short-circuit meant that whenever the user ran in the default @warnings_only = 0 mode, the threshold predicate evaluated to TRUE regardless of the pending-task value. Setting @pending_task_threshold = 100 returned rows with 2 pending tasks just the same. 2. #pending_task_details CROSS APPLY (line ~3179): ... queryProcessing[@pendingTasks > 1] ... Hardcoded literal 1, again with no reference to the parameter. Both now use sql:variable("@pending_task_threshold") unconditionally. @warnings_only still independently controls the WARNING-state filter. Verified the sproc installs clean and @what_to_check = 'scheduler' with @pending_task_threshold = 100 runs without errors against the system_health session on SQL Server 2022. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HealthParser/sp_HealthParser.sql | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/sp_HealthParser/sp_HealthParser.sql b/sp_HealthParser/sp_HealthParser.sql index 61658ff1..c2cf3883 100644 --- a/sp_HealthParser/sp_HealthParser.sql +++ b/sp_HealthParser/sp_HealthParser.sql @@ -2951,7 +2951,11 @@ AND ca.utc_timestamp < @end_date'; CROSS APPLY wi.sp_server_diagnostics_component_result.nodes('/event') AS w(x) WHERE w.x.exist('(data[@name="component"]/text[.= "QUERY_PROCESSING"])') = 1 AND (w.x.exist('(data[@name="state"]/text[.= "WARNING"])') = @warnings_only OR @warnings_only = 0) - AND (w.x.exist('(/event/data[@name="data"]/value/queryProcessing/@pendingTasks[.>= sql:variable("@pending_task_threshold")])') = 1 OR @warnings_only = 0) + /* Threshold is honored whether or not @warnings_only is set — the + parameter documents "minimum pending tasks to display" and the + previous `OR @warnings_only = 0` short-circuit silently ignored + the user-supplied value whenever warnings-only was off. */ + AND w.x.exist('(/event/data[@name="data"]/value/queryProcessing/@pendingTasks[.>= sql:variable("@pending_task_threshold")])') = 1 OPTION(RECOMPILE, MAXDOP 1); IF @debug = 1 @@ -3176,7 +3180,10 @@ AND ca.utc_timestamp < @end_date'; INTO #pending_task_details FROM #sp_server_diagnostics_component_result AS wi CROSS APPLY wi.sp_server_diagnostics_component_result.nodes('/event') AS w(x) - CROSS APPLY w.x.nodes('/event/data[@name="data"]/value/queryProcessing[@pendingTasks > 1]/pendingTasks/entryPoint') AS ep(e) + /* Hardcoded threshold > 1 ignored the @pending_task_threshold + parameter. Replaced with sql:variable() binding so the user's + value actually takes effect here too. */ + CROSS APPLY w.x.nodes('/event/data[@name="data"]/value/queryProcessing[@pendingTasks >= sql:variable("@pending_task_threshold")]/pendingTasks/entryPoint') AS ep(e) WHERE w.x.exist('(data[@name="component"]/text[.= "QUERY_PROCESSING"])') = 1 AND (w.x.exist('(data[@name="state"]/text[.= "WARNING"])') = @warnings_only OR @warnings_only = 0) OPTION(RECOMPILE, MAXDOP 1); From dc6e56dfa2c503cb92f69e6f0459e1211a379e6c Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 18:17:29 -0400 Subject: [PATCH 20/52] Use AND between @dbid and @database_name filter groups in sp_HealthParser MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The deadlock process-list filter combined two "pass-through if NULL" predicate pairs with OR between them: WHERE (x.database_id = @dbid OR @dbid = NULL) OR (x.current_database_name = @db OR @db = NULL) which means "match if either pair's parameter is NULL" — so whenever one of the two was unsupplied (or both), the whole filter short- circuited to TRUE, and every row passed regardless of the other parameter's value. The intent is AND: both filters apply independently when their parameter is supplied, and each individually passes through when NULL. Currently masked in practice because an earlier validation block aborts the proc when @database_name is set but @dbid couldn't be resolved, keeping the two parameters in lockstep. But the shape was broken and would surface the moment that validation relaxed or another caller supplied mismatched inputs. Verified sp_HealthParser installs clean and @what_to_check = 'deadlocks' @database_name = N'PerformanceMonitor' runs against system_health without errors on SQL Server 2022. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HealthParser/sp_HealthParser.sql | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/sp_HealthParser/sp_HealthParser.sql b/sp_HealthParser/sp_HealthParser.sql index c2cf3883..43578b75 100644 --- a/sp_HealthParser/sp_HealthParser.sql +++ b/sp_HealthParser/sp_HealthParser.sql @@ -5728,9 +5728,16 @@ AND ca.utc_timestamp < @end_date'; FROM #deadlocks AS d CROSS APPLY d.xml_deadlock_report.nodes('//deadlock/process-list/process') AS e(x) ) AS x + /* Standard "filter if supplied, pass-through if NULL" predicate + pairs must be combined with AND between the groups — OR let + rows through whenever either parameter was NULL, which makes + the @database_name/@dbid filter loose whenever only one side + was supplied. Currently masked because the validation block + above aborts when the two disagree, but the shape was + wrong and would break if that validation ever relaxed. */ WHERE (x.database_id = @dbid OR @dbid IS NULL) - OR (x.current_database_name = @database_name + AND (x.current_database_name = @database_name OR @database_name IS NULL) OPTION(RECOMPILE, MAXDOP 1); From ba3bbc213219a7ef39538e9996299b4894cdc2cf Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 18:19:05 -0400 Subject: [PATCH 21/52] Batch sp_HealthParser log-retention DELETEs in 10k-row chunks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The log-retention path issued 14 separate unbatched DELETE FROM WHERE collection_time < @cleanup_date statements back to back in a single sp_executesql batch. On an instance that's been running for months with collection enabled but cleanup never executed, any one of those tables could hold millions of rows, and an unbatched DELETE takes a proportional transaction-log hit plus a high chance of lock escalation blocking concurrent writers. Wrapped each DELETE in a WHILE loop using DELETE TOP (10000) with an IF @@ROWCOUNT < 10000 BREAK exit so transactions stay small and the engine can release locks between batches. No pacing / WAITFOR added — callers who want throttling can add it externally. Verified the sproc installs clean and runs @what_to_check = 'waits' against system_health without errors on SQL Server 2022. The cleanup branch itself only fires when log tables exist; the structural change to the dynamic SQL parses correctly. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HealthParser/sp_HealthParser.sql | 65 ++++++++++------------------- 1 file changed, 23 insertions(+), 42 deletions(-) diff --git a/sp_HealthParser/sp_HealthParser.sql b/sp_HealthParser/sp_HealthParser.sql index 43578b75..fa7dc779 100644 --- a/sp_HealthParser/sp_HealthParser.sql +++ b/sp_HealthParser/sp_HealthParser.sql @@ -1259,49 +1259,30 @@ AND ca.utc_timestamp < @end_date'; SYSDATETIME() ); - /* Clean up each log table */ + /* + Clean up each log table in batches of 10,000 rows. A single + unbatched DELETE against a long-installed instance that + hasn't been cleaned up will take a large transaction-log + hit and potentially escalate to table-level locking. The + WHILE loop keeps each transaction small and lets other + writers progress between batches. @@ROWCOUNT < 10000 is + the loop exit condition once the trailing batch finishes. + */ SET @dsql = N' - DELETE FROM ' + @log_table_significant_waits + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_waits_by_count + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_waits_by_duration + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_io_issues + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_cpu_tasks + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_memory_conditions + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_memory_broker + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_memory_node_oom + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_system_health + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_scheduler_issues + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_severe_errors + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_pending_tasks + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_blocking + ' - WHERE collection_time < @cleanup_date; - - DELETE FROM ' + @log_table_deadlocks + ' - WHERE collection_time < @cleanup_date; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_significant_waits + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_waits_by_count + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_waits_by_duration + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_io_issues + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_cpu_tasks + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_memory_conditions + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_memory_broker + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_memory_node_oom + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_system_health + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_scheduler_issues + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_severe_errors + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_pending_tasks + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_blocking + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_deadlocks + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; '; IF @debug = 1 From e4b081f78fd34838502a30d1be7cca516466b3b6 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 18:21:29 -0400 Subject: [PATCH 22/52] Weight sp_HealthParser #tc wait average by waits count MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The #tc aggregation rolls up #topwaits_count rows into wait_type / rounded-time buckets. It summed the waits count correctly but took AVG(tc.average_wait_time_ms) — an unweighted mean of already-averaged per-event values. An event that contributed a single wait got the same pull on the bucket's output as an event with thousands of waits, so the displayed "average wait" skewed toward sparse outlier events. Changed to a weighted average: SUM(avg * waits) / NULLIF(SUM(waits), 0) with CONVERT(decimal(38,2)) on the operands to avoid bigint multiplication overflow on high-volume waits, and NULLIF to keep the expression well-defined if every contributing row has waits = 0. Result is CONVERT(bigint, ...) to preserve the existing output type. Left #td alone — its GROUP BY includes the metric columns themselves, so that block is effectively a DISTINCT rather than an aggregation, and is paired with a downstream ROW_NUMBER() dedupe step. Different shape, different concern. Verified the sproc installs clean and @what_to_check = 'waits' against system_health runs without errors on SQL Server 2022. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HealthParser/sp_HealthParser.sql | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/sp_HealthParser/sp_HealthParser.sql b/sp_HealthParser/sp_HealthParser.sql index fa7dc779..825f2c83 100644 --- a/sp_HealthParser/sp_HealthParser.sql +++ b/sp_HealthParser/sp_HealthParser.sql @@ -2199,7 +2199,21 @@ AND ca.utc_timestamp < @end_date'; ), tc.wait_type, waits = SUM(CONVERT(bigint, tc.waits)), - average_wait_time_ms = CONVERT(bigint, AVG(tc.average_wait_time_ms)), + /* + Weighted average rather than AVG(avg): tc.average_wait_time_ms + is already a per-event average, so AVG() over the bucket was + an unweighted mean of means — events with one wait got the + same pull on the output as events with thousands. Weight by + waits to get the true bucket-scoped average. NULLIF keeps us + safe if every contributing row had waits = 0. + */ + average_wait_time_ms = + CONVERT + ( + bigint, + SUM(CONVERT(decimal(38, 2), tc.average_wait_time_ms) * CONVERT(decimal(38, 2), tc.waits)) + / NULLIF(SUM(CONVERT(decimal(38, 2), tc.waits)), 0) + ), max_wait_time_ms = CONVERT(bigint, MAX(tc.max_wait_time_ms)) INTO #tc FROM #topwaits_count AS tc From db7a0b8d3bc3271f5dac53f1e3080cb0abd3d6d1 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 18:24:06 -0400 Subject: [PATCH 23/52] Align sp_HealthParser 2017+ XE time filter to half-open interval MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The pre-2017 XML-shred branch used a half-open time filter: WHERE ca.utc_timestamp >= @start_date AND ca.utc_timestamp < @end_date The 2017+ branch used BETWEEN, which is a closed interval — an event captured at exactly @end_date was included on 2017+ and excluded on pre-2017. Same inputs, same data, different results depending on which branch ran. Rewrote the 2017+ @time_filter to use explicit `>= AND <` so the boundary semantics match across versions. Half-open is the more common convention for time windows in this sproc and avoids double-counting when callers chain windows end-to-end. Verified sp_HealthParser installs clean and @what_to_check = 'waits' runs without errors on SQL Server 2022 (a 2017+ build). Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HealthParser/sp_HealthParser.sql | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/sp_HealthParser/sp_HealthParser.sql b/sp_HealthParser/sp_HealthParser.sql index 825f2c83..39a17924 100644 --- a/sp_HealthParser/sp_HealthParser.sql +++ b/sp_HealthParser/sp_HealthParser.sql @@ -435,15 +435,24 @@ AND ca.utc_timestamp < @end_date'; END; ELSE BEGIN - /* 2017+ handling */ + /* + 2017+ handling. Use the same half-open (>= @start_date AND + < @end_date) shape as the pre-2017 branch so an event captured + at exactly @end_date is not included on 2017+ while excluded + on pre-2017 — previously BETWEEN meant a closed interval on + 2017+ and a row at the boundary could appear or not depending + on which branch ran. + */ SET @cross_apply = N'CROSS APPLY xml.{object_name}.nodes(''/event'') AS e(x)'; IF @timestamp_utc_mode = 1 SET @time_filter = N' - AND CONVERT(datetimeoffset(7), fx.timestamp_utc) BETWEEN @start_date AND @end_date'; + AND CONVERT(datetimeoffset(7), fx.timestamp_utc) >= @start_date + AND CONVERT(datetimeoffset(7), fx.timestamp_utc) < @end_date'; ELSE SET @time_filter = N' - AND fx.timestamp_utc BETWEEN @start_date AND @end_date'; + AND fx.timestamp_utc >= @start_date + AND fx.timestamp_utc < @end_date'; END; SET @sql_template = From f594e1237b2e87159f7c0c9da772632f6e776727 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 18:39:37 -0400 Subject: [PATCH 24/52] Fix sp_LogHunter custom_message_only validation and canary date floor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two related bugs in the setup path: 1. @custom_message_only = 1 with NULL/empty @custom_message produced an empty #search. The canary and canned-search inserts are gated on @custom_message_only = 0, so they skipped; the custom insert is gated on @custom_message LIKE N'_%' which is UNKNOWN (treated as FALSE) when @custom_message is NULL, so it also skipped. The outer cursor ran over nothing and the sproc returned "no results" on what should have been an invalid input. Added an early validation that RAISERRORs a clear message instead. 2. Canary searches built `days_back` as a "90 days ago at minimum" floor via CASE WHEN @days_back > -90 THEN -90 ELSE @days_back END. When the caller passes @start_date/@end_date instead of @days_back, the setup code NULLs @days_back — and CASE over NULL collapses to NULL, the CONVERT→DATEADD produces NULL, and the final concatenated literal becomes the string "NULL" (or NULLs all the way out), which xp_readerrorlog rejects. Switched to a CASE WHEN @days_back IS NOT NULL fallback that substitutes @start_date when date-range mode is active. Verified on SQL Server 2022: @custom_message_only = 1 (no message) → expected error raised @start_date / @end_date set, @days_back NULL → canary rows build with a real date floor and xp_readerrorlog runs without error Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_LogHunter/sp_LogHunter.sql | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/sp_LogHunter/sp_LogHunter.sql b/sp_LogHunter/sp_LogHunter.sql index 93af4b18..10001bea 100644 --- a/sp_LogHunter/sp_LogHunter.sql +++ b/sp_LogHunter/sp_LogHunter.sql @@ -241,6 +241,21 @@ BEGIN @custom_message_only = 0; END; + /* + @custom_message_only = 1 means "skip the canned search strings and + look only for the user-supplied @custom_message". Without a message + to search for, every insert branch below would skip (the custom + insert is gated on @custom_message LIKE N'_%', which is NULL/false + for NULL or empty input), leaving #search empty and the whole + outer loop a no-op. Reject the combination up front. + */ + IF @custom_message_only = 1 + AND (@custom_message IS NULL OR LEN(@custom_message) = 0) + BEGIN + RAISERROR(N'@custom_message_only = 1 requires a non-empty @custom_message. Provide a search string or set @custom_message_only = 0.', 11, 1) WITH NOWAIT; + RETURN; + END; + /*Fix @end_date*/ IF @start_date IS NOT NULL AND @end_date IS NULL @@ -412,8 +427,24 @@ BEGIN CROSS JOIN ( SELECT + /* + Canary floor is normally "at least 90 days back" so these + server-identity strings are found regardless of how recent + the caller is interested in. When the caller supplied + @start_date/@end_date, @days_back is NULL at this point — + the previous CASE collapsed to NULL, produced a NULL + days_back literal, and xp_readerrorlog received NULL as a + date argument and errored. Fall back to @start_date in + date-range mode so the canary has a concrete floor. + */ days_back = - N'"' + CONVERT(nvarchar(10), DATEADD(DAY, CASE WHEN @days_back > -90 THEN -90 ELSE @days_back END, SYSDATETIME()), 112) + N'"', + N'"' + + CASE + WHEN @days_back IS NOT NULL + THEN CONVERT(nvarchar(10), DATEADD(DAY, CASE WHEN @days_back > -90 THEN -90 ELSE @days_back END, SYSDATETIME()), 112) + ELSE CONVERT(nvarchar(10), @start_date, 112) + END + + N'"', start_date = N'"' + CONVERT(nvarchar(30), @start_date) + N'"', end_date = From 35399ceff669e52f032dd633e8b43f6e79988276 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 19:03:40 -0400 Subject: [PATCH 25/52] Revert "Merge fix/healthparser-cleanup-delete-batch into dev" This reverts commit 8d54a5d2e762145e68fcee367ca02d47e50bcecf, reversing changes made to 2f23916a9bbb8f058736073507a01d87acc1c085. --- sp_HealthParser/sp_HealthParser.sql | 65 +++++++++++++++++++---------- 1 file changed, 42 insertions(+), 23 deletions(-) diff --git a/sp_HealthParser/sp_HealthParser.sql b/sp_HealthParser/sp_HealthParser.sql index 39a17924..948abcb4 100644 --- a/sp_HealthParser/sp_HealthParser.sql +++ b/sp_HealthParser/sp_HealthParser.sql @@ -1268,30 +1268,49 @@ AND ca.utc_timestamp < @end_date'; SYSDATETIME() ); - /* - Clean up each log table in batches of 10,000 rows. A single - unbatched DELETE against a long-installed instance that - hasn't been cleaned up will take a large transaction-log - hit and potentially escalate to table-level locking. The - WHILE loop keeps each transaction small and lets other - writers progress between batches. @@ROWCOUNT < 10000 is - the loop exit condition once the trailing batch finishes. - */ + /* Clean up each log table */ SET @dsql = N' - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_significant_waits + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_waits_by_count + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_waits_by_duration + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_io_issues + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_cpu_tasks + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_memory_conditions + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_memory_broker + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_memory_node_oom + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_system_health + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_scheduler_issues + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_severe_errors + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_pending_tasks + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_blocking + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; - WHILE 1 = 1 BEGIN DELETE TOP (10000) FROM ' + @log_table_deadlocks + ' WHERE collection_time < @cleanup_date; IF @@ROWCOUNT < 10000 BREAK; END; + DELETE FROM ' + @log_table_significant_waits + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_waits_by_count + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_waits_by_duration + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_io_issues + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_cpu_tasks + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_memory_conditions + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_memory_broker + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_memory_node_oom + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_system_health + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_scheduler_issues + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_severe_errors + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_pending_tasks + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_blocking + ' + WHERE collection_time < @cleanup_date; + + DELETE FROM ' + @log_table_deadlocks + ' + WHERE collection_time < @cleanup_date; '; IF @debug = 1 From 52b7c0c9b2d9d28f3789e7dccb5c5587adb3bd4c Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 19:04:58 -0400 Subject: [PATCH 26/52] Revert "Merge fix/query-store-cleanup-batch-pacing into dev" This reverts commit 7a9919fcfbc1ddffd9741faf8137051e81f982cb, reversing changes made to 1057e2b0065ec5207a4efab361f050f22216901b. --- sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql | 41 ------------------- 1 file changed, 41 deletions(-) diff --git a/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql b/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql index 8d578741..6e3d1384 100644 --- a/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql +++ b/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql @@ -40,7 +40,6 @@ ALTER PROCEDURE @dedupe_by varchar(50) = 'all', /*deduplication strategy: all, query_hash, plan_hash, none*/ @min_age_days integer = NULL, /*only remove queries not executed in this many days*/ @report_only bit = 0, /*1 = report what would be removed without removing*/ - @pause_milliseconds integer = 0, /*sleep this many ms between each remove to pace log writes on bulk purges; 0 = no sleep (default)*/ @debug bit = 0, /*prints dynamic sql and diagnostics*/ @help bit = 0, /*prints help information*/ @version varchar(30) = NULL OUTPUT, /*OUTPUT; for support*/ @@ -97,8 +96,6 @@ BEGIN THEN 'only remove queries whose last execution is older than this many days; NULL = no age filter' WHEN N'@report_only' THEN 'report what would be removed without removing' - WHEN N'@pause_milliseconds' - THEN 'sleep this many ms between each sp_query_store_remove_query call so a bulk purge does not hammer the log; 0 = no sleep' WHEN N'@debug' THEN 'prints dynamic sql and diagnostics' WHEN N'@help' @@ -123,8 +120,6 @@ BEGIN THEN 'any positive integer, e.g. 7, 30, 90' WHEN N'@report_only' THEN '0 or 1' - WHEN N'@pause_milliseconds' - THEN 'any non-negative integer, e.g. 25, 100, 500' WHEN N'@debug' THEN '0 or 1' WHEN N'@help' @@ -149,8 +144,6 @@ BEGIN THEN 'NULL; no age filter' WHEN N'@report_only' THEN '0' - WHEN N'@pause_milliseconds' - THEN '0' WHEN N'@debug' THEN '0' WHEN N'@help' @@ -946,35 +939,6 @@ OPTION(RECOMPILE);'; FROM @c INTO @query_id; - /* - Precompute the WAITFOR DELAY string once — WAITFOR's argument has to - be a varchar literal or variable, so we convert @pause_milliseconds - to hh:mm:ss.mmm format up front. Clamp to [0, 60000] to avoid - surprising 10-minute sleeps from a fat-finger and negative values - that would raise at the WAITFOR. - */ - DECLARE - @pause_delay varchar(12) = N''; - - IF @pause_milliseconds IS NOT NULL - AND @pause_milliseconds > 0 - BEGIN - IF @pause_milliseconds > 60000 - BEGIN - SELECT - @pause_milliseconds = 60000; - END; - - SELECT - @pause_delay = - CONVERT - ( - varchar(12), - DATEADD(MILLISECOND, @pause_milliseconds, CONVERT(time(3), '00:00:00')), - 114 - ); - END; - WHILE @@FETCH_STATUS = 0 BEGIN SELECT @@ -999,11 +963,6 @@ OPTION(RECOMPILE);'; RAISERROR('Query %I64d of %I64d: query_id %I64d not removed (%s)', 0, 1, @current, @total, @query_id, @error_message) WITH NOWAIT; END CATCH; - IF @pause_delay <> N'' - BEGIN - WAITFOR DELAY @pause_delay; - END; - FETCH NEXT FROM @c INTO @query_id; From 5e62cfb157a54d077b643ab8beb1227b19deb32c Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 19:05:42 -0400 Subject: [PATCH 27/52] Revert "Merge fix/blockviewer-transaction-attribute-mismatch into dev" This reverts commit d7d74b0a17dbf8b346b2cfa9a5c9ced505eade7e, reversing changes made to ff9e5758cac7b665b5c16e9f7dec1969de31a027. --- sp_HumanEvents/sp_HumanEventsBlockViewer.sql | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql index d9ef5d38..03717dec 100644 --- a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql +++ b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql @@ -1928,11 +1928,7 @@ SELECT query_text_pre = bd.value('(process/inputbuf/text())[1]', 'nvarchar(max)'), wait_time = bd.value('(process/@waittime)[1]', 'bigint'), transaction_name = bd.value('(process/@transactionname)[1]', 'nvarchar(1024)'), - /* lastbatchstarted matches the other three read sites for this column - (including the blocking-side read 120 lines below that UNION ALLs with - this row). Previously this site read @lasttranstarted, leaving the same - column holding two different attributes on blocked vs blocking rows. */ - last_transaction_started = bd.value('(process/@lastbatchstarted)[1]', 'datetime2'), + last_transaction_started = bd.value('(process/@lasttranstarted)[1]', 'datetime2'), last_transaction_completed = bd.value('(process/@lastbatchcompleted)[1]', 'datetime2'), wait_resource = bd.value('(process/@waitresource)[1]', 'nvarchar(1024)'), lock_mode = bd.value('(process/@lockMode)[1]', 'nvarchar(10)'), From e46bbc1f48afc076998325fb35e276fc39bacc21 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 19:06:37 -0400 Subject: [PATCH 28/52] Revert "Merge fix/index-cleanup-chain-cycle-cap into dev" This reverts commit 2493a35f79a8c22c597cbb0ac9ebef94b419e9df, reversing changes made to 186c214a843abee7538809eddffee9abcc232de3. --- sp_IndexCleanup/sp_IndexCleanup.sql | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/sp_IndexCleanup/sp_IndexCleanup.sql b/sp_IndexCleanup/sp_IndexCleanup.sql index 4a46a88d..b0988499 100644 --- a/sp_IndexCleanup/sp_IndexCleanup.sql +++ b/sp_IndexCleanup/sp_IndexCleanup.sql @@ -3284,20 +3284,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* Resolve subset chains: if A -> B -> C, flatten so A -> C directly. Without this, includes from transitive subsets are lost because Rule 6 only collects includes from direct subsets of the final superset. */ - /* - Flatten subset chains (A → B → C, so A → C) one hop at a time. Rule 3 - only points at strictly wider indexes, which is a partial order, so a - cycle cannot form under current logic. Cap iterations anyway so a - future bug that introduces a cycle surfaces as a warning rather than - an infinite loop that eventually fills tempdb. A chain depth of 100 - would require 100 distinct indexes on a single table in a prefix - supersession chain — not realistic in practice. - */ DECLARE @chains_resolved bigint = 1; - DECLARE @chain_iterations integer = 0; WHILE @chains_resolved > 0 - AND @chain_iterations < 100 BEGIN UPDATE ia1 @@ -3315,12 +3304,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AND ia1.target_index_name <> ia2.target_index_name; SET @chains_resolved = ROWCOUNT_BIG(); - SET @chain_iterations += 1; - END; - - IF @chain_iterations >= 100 - BEGIN - RAISERROR('sp_IndexCleanup chain resolution hit the 100-iteration cap. This should never happen under the current rules; a cycle in target_index_name likely indicates a bug introduced by a later rule change. The recommendations below may be inconsistent — please investigate before running them.', 16, 1) WITH NOWAIT; END; IF @debug = 1 From 3e0e6d1a08e955313dce75759f2f149d66a2ffff Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 19:07:34 -0400 Subject: [PATCH 29/52] Revert "Protect primary keys from sp_IndexCleanup Rule 2 and Rule 5 DISABLE" This reverts commit e9eaee88f7828751c29be8eaa16ea945e791ca32. --- sp_IndexCleanup/sp_IndexCleanup.sql | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/sp_IndexCleanup/sp_IndexCleanup.sql b/sp_IndexCleanup/sp_IndexCleanup.sql index b0988499..8b005a59 100644 --- a/sp_IndexCleanup/sp_IndexCleanup.sql +++ b/sp_IndexCleanup/sp_IndexCleanup.sql @@ -3115,20 +3115,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AND ia1.exact_match_hash = ia2.exact_match_hash /* Exact match: keys + includes + filter */ WHERE ia1.consolidation_rule IS NULL /* Not already processed */ AND ia2.consolidation_rule IS NULL /* Not already processed */ - /* Exclude unique constraints and primary keys on the loser (ia1) side. - Rule 2 is only allowed to DISABLE a regular nonclustered index — never - a PK or UC, both of which back FK referential integrity and cannot be - safely disabled. UCs are still processed as Rule 7.5 targets; PKs are - off-limits entirely for disabling. The opposite permutation of this - pair (with the PK as ia2, the keeper) still runs and correctly - disables the non-PK duplicate. */ + /* Exclude unique constraints - we'll handle those separately in Rule 7 */ AND NOT EXISTS ( SELECT 1/0 - FROM #index_details AS id1_pk - WHERE id1_pk.index_hash = ia1.index_hash - AND (id1_pk.is_unique_constraint = 1 OR id1_pk.is_primary_key = 1) + FROM #index_details AS id1_uc + WHERE id1_uc.index_hash = ia1.index_hash + AND id1_uc.is_unique_constraint = 1 ) AND NOT EXISTS ( @@ -3403,17 +3397,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AND ISNULL(ia1.included_columns, '') <> ISNULL(ia2.included_columns, '') /* Different includes */ WHERE ia1.consolidation_rule IS NULL /* Not already processed */ AND ia2.consolidation_rule IS NULL /* Not already processed */ - /* Exclude unique constraints and primary keys on the loser (ia1) side. - Same reasoning as Rule 2: Rule 5 may only DISABLE a regular NC, not a - PK or UC whose index backs FK referential integrity. UCs are still - processed via Rule 7.5; PKs must never be disabled. */ + /* Exclude pairs where either one is a unique constraint (we'll handle those separately in Rule 7) */ AND NOT EXISTS ( SELECT 1/0 - FROM #index_details AS id1_pk - WHERE id1_pk.index_hash = ia1.index_hash - AND (id1_pk.is_unique_constraint = 1 OR id1_pk.is_primary_key = 1) + FROM #index_details AS id1_uc + WHERE id1_uc.index_hash = ia1.index_hash + AND id1_uc.is_unique_constraint = 1 ) AND NOT EXISTS ( From 3763df5587de321f15e7344655c373c66004c230 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 20:27:07 -0400 Subject: [PATCH 30/52] Correlate sample_sql_handle / sample_plan_handle to the same row in sp_QuickieCache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The procedure / function / trigger aggregation paths built sample_sql_handle and sample_plan_handle with independent MAX() calls inside a GROUP BY (database_id, object_id). Each MAX picks the lexicographically-highest value across the group, so the two handles could come from different underlying plan rows — producing a pair where the sql_handle's text doesn't match the plan_handle's XML when retrieved downstream. Erik already fixed this for the Statement path (see the post-INSERT UPDATE at line ~1146 with an in-code comment that calls out exactly this bug). Three other paths were missed. Restructured each SELECT to read the DMV once into a derived table that adds ROW_NUMBER() OVER (PARTITION BY database_id, object_id ORDER BY execution_count DESC), then in the outer aggregate use MAX(CASE WHEN n = 1 THEN ... END) to pull both handles from the same winner row. Single DMV scan, one sort, one aggregate — avoids the CROSS APPLY-per-group shape that nested-loops poorly on busy servers with many procs and many plans (same pull-once reasoning as sp_HumanEventsBlockViewer's #dm_exec_query_stats_sh cache). Smoke-tested: sproc installs clean and runs @top = 5 on SQL Server 2022 without errors. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_QuickieCache/sp_QuickieCache.sql | 234 +++++++++++++++++++--------- 1 file changed, 162 insertions(+), 72 deletions(-) diff --git a/sp_QuickieCache/sp_QuickieCache.sql b/sp_QuickieCache/sp_QuickieCache.sql index 49fa86f2..e2b35b39 100644 --- a/sp_QuickieCache/sp_QuickieCache.sql +++ b/sp_QuickieCache/sp_QuickieCache.sql @@ -1217,34 +1217,72 @@ OPTION(RECOMPILE, MAXDOP 1);'; sample_sql_handle, sample_plan_handle ) + /* + sample_sql_handle and sample_plan_handle previously used + MAX(ps.sql_handle) and MAX(ps.plan_handle) — each picked the + lexicographic max independently, so the two values could come + from different plan rows and produce a mismatched text/plan + pair when retrieved downstream. ROW_NUMBER() OVER + (PARTITION BY database_id, object_id ORDER BY execution_count + DESC) in a derived table, then MAX(CASE WHEN n = 1 THEN ...) + in the outer aggregate, pulls both handles from the SAME winner + row. Single DMV scan + one sort + one aggregate — much lighter + than CROSS APPLY-ing the DMV per group, which nested-loops + poorly on busy servers. + */ SELECT query_type = 'Procedure', - database_name = DB_NAME(ps.database_id), - object_name = OBJECT_SCHEMA_NAME(ps.object_id, ps.database_id) + N'.' + OBJECT_NAME(ps.object_id, ps.database_id), - plan_count = COUNT_BIG(DISTINCT ps.plan_handle), - total_executions = SUM(ps.execution_count), - total_cpu_ms = SUM(ps.total_worker_time) / 1000.0, - total_duration_ms = SUM(ps.total_elapsed_time) / 1000.0, - total_logical_reads = SUM(ps.total_logical_reads), - total_logical_writes = SUM(ps.total_logical_writes), - total_physical_reads = SUM(ps.total_physical_reads), - oldest_plan_creation = MIN(ps.cached_time), - newest_plan_creation = MAX(ps.cached_time), - last_execution_time = MAX(ps.last_execution_time), - sample_sql_handle = MAX(ps.sql_handle), - sample_plan_handle = MAX(ps.plan_handle) - FROM sys.dm_exec_procedure_stats AS ps - WHERE ps.execution_count >= @minimum_execution_count - AND ps.database_id > CASE WHEN @ignore_system_databases = 1 THEN 4 ELSE 0 END - AND ps.database_id < 32761 - AND ps.database_id = ISNULL(@database_id, ps.database_id) - AND ps.cached_time >= ISNULL(@start_date, ps.cached_time) - AND ps.cached_time < ISNULL(@end_date, DATEADD(DAY, 1, ps.cached_time)) + database_name = DB_NAME(r.database_id), + object_name = OBJECT_SCHEMA_NAME(r.object_id, r.database_id) + N'.' + OBJECT_NAME(r.object_id, r.database_id), + plan_count = COUNT_BIG(DISTINCT r.plan_handle), + total_executions = SUM(r.execution_count), + total_cpu_ms = SUM(r.total_worker_time) / 1000.0, + total_duration_ms = SUM(r.total_elapsed_time) / 1000.0, + total_logical_reads = SUM(r.total_logical_reads), + total_logical_writes = SUM(r.total_logical_writes), + total_physical_reads = SUM(r.total_physical_reads), + oldest_plan_creation = MIN(r.cached_time), + newest_plan_creation = MAX(r.cached_time), + last_execution_time = MAX(r.last_execution_time), + sample_sql_handle = MAX(CASE WHEN r.n = 1 THEN r.sql_handle END), + sample_plan_handle = MAX(CASE WHEN r.n = 1 THEN r.plan_handle END) + FROM + ( + SELECT + ps.database_id, + ps.object_id, + ps.plan_handle, + ps.sql_handle, + ps.execution_count, + ps.total_worker_time, + ps.total_elapsed_time, + ps.total_logical_reads, + ps.total_logical_writes, + ps.total_physical_reads, + ps.cached_time, + ps.last_execution_time, + n = + ROW_NUMBER() OVER + ( + PARTITION BY + ps.database_id, + ps.object_id + ORDER BY + ps.execution_count DESC + ) + FROM sys.dm_exec_procedure_stats AS ps + WHERE ps.execution_count >= @minimum_execution_count + AND ps.database_id > CASE WHEN @ignore_system_databases = 1 THEN 4 ELSE 0 END + AND ps.database_id < 32761 + AND ps.database_id = ISNULL(@database_id, ps.database_id) + AND ps.cached_time >= ISNULL(@start_date, ps.cached_time) + AND ps.cached_time < ISNULL(@end_date, DATEADD(DAY, 1, ps.cached_time)) + ) AS r GROUP BY - ps.database_id, - ps.object_id + r.database_id, + r.object_id HAVING - SUM(ps.execution_count) >= @minimum_execution_count + SUM(r.execution_count) >= @minimum_execution_count OPTION(RECOMPILE, MAXDOP 1); IF @debug = 1 @@ -1300,34 +1338,60 @@ WITH sample_sql_handle, sample_plan_handle ) +/* Same ROW_NUMBER + derived-table pattern as procedure path. */ SELECT query_type = ''Function'', - database_name = DB_NAME(fs.database_id), - object_name = OBJECT_SCHEMA_NAME(fs.object_id, fs.database_id) + N''.'' + OBJECT_NAME(fs.object_id, fs.database_id), - plan_count = COUNT_BIG(DISTINCT fs.plan_handle), - total_executions = SUM(fs.execution_count), - total_cpu_ms = SUM(fs.total_worker_time) / 1000.0, - total_duration_ms = SUM(fs.total_elapsed_time) / 1000.0, - total_logical_reads = SUM(fs.total_logical_reads), - total_logical_writes = SUM(fs.total_logical_writes), - total_physical_reads = SUM(fs.total_physical_reads), - oldest_plan_creation = MIN(fs.cached_time), - newest_plan_creation = MAX(fs.cached_time), - last_execution_time = MAX(fs.last_execution_time), - sample_sql_handle = MAX(fs.sql_handle), - sample_plan_handle = MAX(fs.plan_handle) -FROM sys.dm_exec_function_stats AS fs -WHERE fs.execution_count >= @minimum_execution_count -AND fs.database_id > CASE WHEN @ignore_system_databases = 1 THEN 4 ELSE 0 END -AND fs.database_id < 32761 -AND fs.database_id = ISNULL(@database_id, fs.database_id) -AND fs.cached_time >= ISNULL(@start_date, fs.cached_time) -AND fs.cached_time < ISNULL(@end_date, DATEADD(DAY, 1, fs.cached_time)) + database_name = DB_NAME(r.database_id), + object_name = OBJECT_SCHEMA_NAME(r.object_id, r.database_id) + N''.'' + OBJECT_NAME(r.object_id, r.database_id), + plan_count = COUNT_BIG(DISTINCT r.plan_handle), + total_executions = SUM(r.execution_count), + total_cpu_ms = SUM(r.total_worker_time) / 1000.0, + total_duration_ms = SUM(r.total_elapsed_time) / 1000.0, + total_logical_reads = SUM(r.total_logical_reads), + total_logical_writes = SUM(r.total_logical_writes), + total_physical_reads = SUM(r.total_physical_reads), + oldest_plan_creation = MIN(r.cached_time), + newest_plan_creation = MAX(r.cached_time), + last_execution_time = MAX(r.last_execution_time), + sample_sql_handle = MAX(CASE WHEN r.n = 1 THEN r.sql_handle END), + sample_plan_handle = MAX(CASE WHEN r.n = 1 THEN r.plan_handle END) +FROM +( + SELECT + fs.database_id, + fs.object_id, + fs.plan_handle, + fs.sql_handle, + fs.execution_count, + fs.total_worker_time, + fs.total_elapsed_time, + fs.total_logical_reads, + fs.total_logical_writes, + fs.total_physical_reads, + fs.cached_time, + fs.last_execution_time, + n = + ROW_NUMBER() OVER + ( + PARTITION BY + fs.database_id, + fs.object_id + ORDER BY + fs.execution_count DESC + ) + FROM sys.dm_exec_function_stats AS fs + WHERE fs.execution_count >= @minimum_execution_count + AND fs.database_id > CASE WHEN @ignore_system_databases = 1 THEN 4 ELSE 0 END + AND fs.database_id < 32761 + AND fs.database_id = ISNULL(@database_id, fs.database_id) + AND fs.cached_time >= ISNULL(@start_date, fs.cached_time) + AND fs.cached_time < ISNULL(@end_date, DATEADD(DAY, 1, fs.cached_time)) +) AS r GROUP BY - fs.database_id, - fs.object_id + r.database_id, + r.object_id HAVING - SUM(fs.execution_count) >= @minimum_execution_count + SUM(r.execution_count) >= @minimum_execution_count OPTION(RECOMPILE, MAXDOP 1);'; EXECUTE sys.sp_executesql @@ -1380,34 +1444,60 @@ OPTION(RECOMPILE, MAXDOP 1);'; sample_sql_handle, sample_plan_handle ) + /* Same ROW_NUMBER + derived-table pattern as procedure/function paths. */ SELECT query_type = 'Trigger', - database_name = DB_NAME(ts.database_id), - object_name = OBJECT_SCHEMA_NAME(ts.object_id, ts.database_id) + N'.' + OBJECT_NAME(ts.object_id, ts.database_id), - plan_count = COUNT_BIG(DISTINCT ts.plan_handle), - total_executions = SUM(ts.execution_count), - total_cpu_ms = SUM(ts.total_worker_time) / 1000.0, - total_duration_ms = SUM(ts.total_elapsed_time) / 1000.0, - total_logical_reads = SUM(ts.total_logical_reads), - total_logical_writes = SUM(ts.total_logical_writes), - total_physical_reads = SUM(ts.total_physical_reads), - oldest_plan_creation = MIN(ts.cached_time), - newest_plan_creation = MAX(ts.cached_time), - last_execution_time = MAX(ts.last_execution_time), - sample_sql_handle = MAX(ts.sql_handle), - sample_plan_handle = MAX(ts.plan_handle) - FROM sys.dm_exec_trigger_stats AS ts - WHERE ts.execution_count >= @minimum_execution_count - AND ts.database_id > CASE WHEN @ignore_system_databases = 1 THEN 4 ELSE 0 END - AND ts.database_id < 32761 - AND ts.database_id = ISNULL(@database_id, ts.database_id) - AND ts.cached_time >= ISNULL(@start_date, ts.cached_time) - AND ts.cached_time < ISNULL(@end_date, DATEADD(DAY, 1, ts.cached_time)) + database_name = DB_NAME(r.database_id), + object_name = OBJECT_SCHEMA_NAME(r.object_id, r.database_id) + N'.' + OBJECT_NAME(r.object_id, r.database_id), + plan_count = COUNT_BIG(DISTINCT r.plan_handle), + total_executions = SUM(r.execution_count), + total_cpu_ms = SUM(r.total_worker_time) / 1000.0, + total_duration_ms = SUM(r.total_elapsed_time) / 1000.0, + total_logical_reads = SUM(r.total_logical_reads), + total_logical_writes = SUM(r.total_logical_writes), + total_physical_reads = SUM(r.total_physical_reads), + oldest_plan_creation = MIN(r.cached_time), + newest_plan_creation = MAX(r.cached_time), + last_execution_time = MAX(r.last_execution_time), + sample_sql_handle = MAX(CASE WHEN r.n = 1 THEN r.sql_handle END), + sample_plan_handle = MAX(CASE WHEN r.n = 1 THEN r.plan_handle END) + FROM + ( + SELECT + ts.database_id, + ts.object_id, + ts.plan_handle, + ts.sql_handle, + ts.execution_count, + ts.total_worker_time, + ts.total_elapsed_time, + ts.total_logical_reads, + ts.total_logical_writes, + ts.total_physical_reads, + ts.cached_time, + ts.last_execution_time, + n = + ROW_NUMBER() OVER + ( + PARTITION BY + ts.database_id, + ts.object_id + ORDER BY + ts.execution_count DESC + ) + FROM sys.dm_exec_trigger_stats AS ts + WHERE ts.execution_count >= @minimum_execution_count + AND ts.database_id > CASE WHEN @ignore_system_databases = 1 THEN 4 ELSE 0 END + AND ts.database_id < 32761 + AND ts.database_id = ISNULL(@database_id, ts.database_id) + AND ts.cached_time >= ISNULL(@start_date, ts.cached_time) + AND ts.cached_time < ISNULL(@end_date, DATEADD(DAY, 1, ts.cached_time)) + ) AS r GROUP BY - ts.database_id, - ts.object_id + r.database_id, + r.object_id HAVING - SUM(ts.execution_count) >= @minimum_execution_count + SUM(r.execution_count) >= @minimum_execution_count OPTION(RECOMPILE, MAXDOP 1); IF @debug = 1 From 0b68dd54a72ea978087c24c9d911f68e0937a59e Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 20:35:16 -0400 Subject: [PATCH 31/52] Document @timestamp_column UTC convention in sp_HumanEventsBlockViewer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @start_date and @end_date are shifted from local to UTC earlier in the proc so they line up with the XML @timestamp attribute, which SQL Server always emits in UTC. The same UTC-shifted values are then used for the optional @timestamp_column filter when that parameter is supplied, which means the caller's column must itself hold UTC timestamps — a column storing local time will be filtered against the wrong window by the local-vs-UTC offset. There's no code fix that satisfies both "user stored UTC" and "user stored local" callers without an additional parameter, so instead: - Updated the parameter help (description + valid_inputs) to state the UTC requirement explicitly. - Added a block comment above the dynamic SQL that applies the filter, so anyone tracing the code sees the convention. - Updated the inline comment on the parameter declaration to point readers at @help = 1 for the full explanation. No behavior change. Existing installs that already stored UTC (matching the XML convention) continue to work; installs that stored local are the silently-broken set that this doc change alerts. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEventsBlockViewer.sql | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql index 03717dec..1bde4f7c 100644 --- a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql +++ b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql @@ -72,7 +72,7 @@ ALTER PROCEDURE @target_schema sysname = NULL, /*schema of the table*/ @target_table sysname = NULL, /*table name*/ @target_column sysname = NULL, /*column containing XML data*/ - @timestamp_column sysname = NULL, /*column containing timestamp (optional)*/ + @timestamp_column sysname = NULL, /*column containing UTC timestamp (optional); see @help = 1 for details*/ @log_to_table bit = 0, /*enable logging to permanent tables*/ @log_database_name sysname = NULL, /*database to store logging tables*/ @log_schema_name sysname = NULL, /*schema to store logging tables*/ @@ -126,7 +126,7 @@ BEGIN WHEN N'@target_schema' THEN 'schema of the table containing blocked process report data' WHEN N'@target_table' THEN 'table containing blocked process report data' WHEN N'@target_column' THEN 'column containing blocked process report XML' - WHEN N'@timestamp_column' THEN 'column containing timestamp for filtering (optional)' + WHEN N'@timestamp_column' THEN 'column containing UTC timestamp for filtering (optional). MUST be stored in UTC — @start_date and @end_date are shifted to UTC internally to match the XML @timestamp attribute, and the same UTC-shifted values are used for this column filter. A column in local time will be filtered against the wrong window.' WHEN N'@log_to_table' THEN N'enable logging to permanent tables instead of returning results' WHEN N'@log_database_name' THEN N'database to store logging tables' WHEN N'@log_schema_name' THEN N'schema to store logging tables' @@ -150,7 +150,7 @@ BEGIN WHEN N'@target_schema' THEN 'a schema in the target database' WHEN N'@target_table' THEN 'a table in the target schema' WHEN N'@target_column' THEN 'an XML column containing blocked process report data' - WHEN N'@timestamp_column' THEN 'a datetime column for filtering by date range' + WHEN N'@timestamp_column' THEN 'a datetime / datetime2 / datetimeoffset column storing UTC timestamps' WHEN N'@log_to_table' THEN N'0 or 1' WHEN N'@log_database_name' THEN N'any valid database name' WHEN N'@log_schema_name' THEN N'any valid schema name' @@ -1852,7 +1852,16 @@ BEGIN N'.nodes(''/event'') AS e(x) WHERE e.x.exist(''@name[ .= "blocked_process_report"]'') = 1'; - /* Add timestamp filtering if specified*/ + /* + Add timestamp filtering if specified. + + NOTE: @start_date and @end_date are shifted from local to UTC earlier + in the proc so they line up with the XML @timestamp attribute (which + is UTC). The @timestamp_column value is passed through as-is, so the + caller's column MUST already contain UTC timestamps — if it holds + local time, rows will be filtered against the wrong window by the + local-vs-UTC offset. See the parameter help text. + */ IF @timestamp_column IS NOT NULL BEGIN SET @extract_sql = @extract_sql + N' From 52fc3bfd26591ae45e51d71c0ed2945b0709ac35 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 20:37:59 -0400 Subject: [PATCH 32/52] Flag the @regression_where_clause REPLACE fragility in sp_QuickieStore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The regression-mode setup rewrites @where_clause into @regression_where_clause by textually REPLACEing '@start_date' and '@end_date' with their baseline-window counterparts. Works today because the only @where_clause += site that introduces those tokens is the date-range filter that actually wants to be rewritten. Any future filter added with those parameter names for a different purpose would be silently corrupted by the REPLACE. Not a current bug — a maintenance hazard that is not obvious from reading the REPLACE in isolation. Added a block comment at the REPLACE explaining the constraint and outlining the two ways out if the situation ever changes (don't reuse those parameter names in @where_clause, or switch to a sentinel-token builder). No behavior change. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_QuickieStore/sp_QuickieStore.sql | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/sp_QuickieStore/sp_QuickieStore.sql b/sp_QuickieStore/sp_QuickieStore.sql index b6e0627d..7b1d64c0 100644 --- a/sp_QuickieStore/sp_QuickieStore.sql +++ b/sp_QuickieStore/sp_QuickieStore.sql @@ -8412,6 +8412,29 @@ to use @regression_where_clause. IF @regression_mode = 1 BEGIN +/* +Fragility note for future maintainers: + +This block rebuilds @where_clause into @regression_where_clause by +textually replacing the tokens '@start_date' and '@end_date' with their +regression-baseline counterparts. It works today because the ONLY site +that introduces those tokens into @where_clause is the date-range +filter added further up (look for + "qsrs.last_execution_time >= @start_date + AND qsrs.last_execution_time < @end_date") +and that's exactly the fragment we want rewritten for the baseline +window. + +If a new filter is ever added that references @start_date or @end_date +for a DIFFERENT purpose (e.g. a statistical lookback window that should +NOT move with the regression baseline), this string REPLACE will +silently corrupt it. Either: + - don't use @start_date / @end_date as parameter names in any other + @where_clause += fragment, or + - switch to a sentinel-token approach (e.g. build with '{{start}}' + / '{{end}}' and REPLACE to the appropriate parameter name per + window) so the regression rewrite is explicit. +*/ SELECT @regression_where_clause = REPLACE From 82e08d9c7373e3a5f1ef0d353bbb4834fdb485cd Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 20:42:49 -0400 Subject: [PATCH 33/52] Remove SUBSTRING(..., 0, 8000) wrapper from sp_HumanEvents wait-type filter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The @wait_type_filter builder wrapped the FOR XML PATH output in SUBSTRING(, 0, 8000), then appended a closing paren. Two minor issues: - SUBSTRING(x, 0, 8000) returns 7,999 chars, not 8,000 — the 0-start offset eats one position. Classic off-by-one. - @wait_type_filter is nvarchar(max); the cap is arbitrary. If the predicate ever exceeded 7,999 chars, the trailing ')' would be appended to a mid-expression truncation and produce invalid XE session filter syntax. Realistically this rarely bites: the filter uses numeric wait_type IDs (nvarchar(11) max per group) and contiguous-ID grouping compresses the output further, so even @wait_type = 'all' stays well under the 7,999-char ceiling in practice. Still worth cleaning up — removed the SUBSTRING wrapper so the full nvarchar(max) FOR XML concatenation flows through to the session filter, no arbitrary cap. Smoke-tested @event_type = 'waits' with @wait_type = 'all' (the longest-filter path) against SQL Server 2022 — no errors. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEvents.sql | 96 +++++++++++++++++-------------- 1 file changed, 52 insertions(+), 44 deletions(-) diff --git a/sp_HumanEvents/sp_HumanEvents.sql b/sp_HumanEvents/sp_HumanEvents.sql index ea31dd02..5ce07af0 100644 --- a/sp_HumanEvents/sp_HumanEvents.sql +++ b/sp_HumanEvents/sp_HumanEvents.sql @@ -1478,54 +1478,62 @@ BEGIN GROUP BY maps.rn ) + /* + Build the wait_type filter as the full nvarchar(max) FOR XML + concatenation. Previously wrapped in SUBSTRING(..., 0, 8000), which + has two problems: + - SUBSTRING(x, 0, N) returns N-1 chars (the 0-start offset eats + one position). The cap was actually 7,999 chars, not 8,000. + - @wait_type_filter is nvarchar(max); capping at ~8k bytes is + arbitrary. With @wait_type = 'all' the predicate can grow past + that and the trailing closing paren appended below was being + tacked onto a mid-expression truncation — producing an invalid + XE session filter. + No cap needed; let the full predicate through. + */ SELECT @wait_type_filter += - SUBSTRING ( - ( - SELECT - N' AND ((' + - STUFF + SELECT + N' AND ((' + + STUFF + ( ( - ( - SELECT - N' OR ' + - CASE - WHEN grps.minkey < grps.maxkey - THEN + - N'(wait_type >= ' + - CONVERT - ( - nvarchar(11), - grps.minkey - ) + - N' AND wait_type <= ' + - CONVERT - ( - nvarchar(11), - grps.maxkey - ) + - N')' + - @nc10 - ELSE N'(wait_type = ' + - CONVERT - ( - nvarchar(11), - grps.minkey - ) + - N')' + - @nc10 - END - FROM grps FOR XML PATH(N''), TYPE - ).value('./text()[1]', 'nvarchar(max)') - , - 1, - 13, - N'' - ) - ), - 0, - 8000 + SELECT + N' OR ' + + CASE + WHEN grps.minkey < grps.maxkey + THEN + + N'(wait_type >= ' + + CONVERT + ( + nvarchar(11), + grps.minkey + ) + + N' AND wait_type <= ' + + CONVERT + ( + nvarchar(11), + grps.maxkey + ) + + N')' + + @nc10 + ELSE N'(wait_type = ' + + CONVERT + ( + nvarchar(11), + grps.minkey + ) + + N')' + + @nc10 + END + FROM grps FOR XML PATH(N''), TYPE + ).value('./text()[1]', 'nvarchar(max)') + , + 1, + 13, + N'' + ) ) + N')'; END; From 602d09c04cf795f19e2fedf3a83b567d6fb3964c Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 20:47:25 -0400 Subject: [PATCH 34/52] Fix SUBSTRING length-vs-endpos in debug @sql chunk-print across 3 sprocs The @debug = 1 paths that PRINT long @sql values tile the output in 4000-char chunks to stay under PRINT's truncation limit. Several sites across sprocs had the chunks wrong in two ways: sp_QuickieStore line ~12310: PRINT SUBSTRING(@sql, 0, 4000); -- returns 3999 chars (0-start) PRINT SUBSTRING(@sql, 4001, 8000); -- 8000 chars from 4001 -> 4001..12000 PRINT SUBSTRING(@sql, 8001, 12000); -- 12000 chars from 8001 -> 8001..20000 PRINT SUBSTRING(@sql, 12001, 16000); -- 16000 chars from 12001 -> 12001..28000 sp_IndexCleanup (two sites): PRINT SUBSTRING(@sql, 1, 4000); PRINT SUBSTRING(@sql, 4000, 8000); -- 8000 chars from 4000 -> overlaps sp_PerfCheck line ~4652: PRINT SUBSTRING(@sql, 1, 4000); PRINT SUBSTRING(@sql, 4001, 8000); -- 8000 chars from 4001 -> overshoots SUBSTRING's third argument is length, not end-position. The authors appeared to treat it as an end-position, which produced overlapping (and in the QuickieStore case, massively overlapping) chunks. Normalized every site to: PRINT SUBSTRING(@sql, 1, 4000); PRINT SUBSTRING(@sql, 4001, 4000); -- and 8001, 4000 / 12001, 4000 as needed Debug-only output, no runtime behavior change. Made the QuickieStore version a block comment explaining the fix for future eyes. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_IndexCleanup/sp_IndexCleanup.sql | 8 ++++---- sp_PerfCheck/sp_PerfCheck.sql | 4 ++-- sp_QuickieStore/sp_QuickieStore.sql | 18 ++++++++++++++---- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/sp_IndexCleanup/sp_IndexCleanup.sql b/sp_IndexCleanup/sp_IndexCleanup.sql index 8b005a59..b6d3e05a 100644 --- a/sp_IndexCleanup/sp_IndexCleanup.sql +++ b/sp_IndexCleanup/sp_IndexCleanup.sql @@ -2349,8 +2349,8 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. IF @debug = 1 BEGIN - PRINT SUBSTRING(@sql, 1, 4000); - PRINT SUBSTRING(@sql, 4000, 8000); + PRINT SUBSTRING(@sql, 1, 4000); + PRINT SUBSTRING(@sql, 4001, 4000); END; INSERT INTO @@ -2565,8 +2565,8 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. IF @debug = 1 BEGIN - PRINT SUBSTRING(@sql, 1, 4000); - PRINT SUBSTRING(@sql, 4000, 8000); + PRINT SUBSTRING(@sql, 1, 4000); + PRINT SUBSTRING(@sql, 4001, 4000); END; INSERT INTO diff --git a/sp_PerfCheck/sp_PerfCheck.sql b/sp_PerfCheck/sp_PerfCheck.sql index 5d21c904..c17c102b 100644 --- a/sp_PerfCheck/sp_PerfCheck.sql +++ b/sp_PerfCheck/sp_PerfCheck.sql @@ -4649,8 +4649,8 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. PRINT @current_database_id; PRINT @current_database_name; PRINT REPLICATE('=', 64); - PRINT SUBSTRING(@sql, 1, 4000); - PRINT SUBSTRING(@sql, 4001, 8000); + PRINT SUBSTRING(@sql, 1, 4000); + PRINT SUBSTRING(@sql, 4001, 4000); END; EXECUTE sys.sp_executesql diff --git a/sp_QuickieStore/sp_QuickieStore.sql b/sp_QuickieStore/sp_QuickieStore.sql index 7b1d64c0..aeef18d9 100644 --- a/sp_QuickieStore/sp_QuickieStore.sql +++ b/sp_QuickieStore/sp_QuickieStore.sql @@ -12306,11 +12306,21 @@ OPTION(RECOMPILE);' + @nc10 IF @debug = 1 BEGIN + /* + PRINT truncates at 4000 chars for nvarchar/8000 for varchar, so + long @sql needs to be chunked. SUBSTRING's third argument is + length, not end-position — the previous calls had 4001/8000, + 8001/12000, 12001/16000 which tiled *lengths* against *starts* + and produced massively overlapping windows (each chunk dumped + 8k/12k/16k chars from its start, not the intended 4k). The + first chunk also started at 0, which SUBSTRING treats as "one + before position 1" — only 3,999 chars came out. Fixed both. + */ PRINT LEN(@sql); - PRINT SUBSTRING(@sql, 0, 4000); - PRINT SUBSTRING(@sql, 4001, 8000); - PRINT SUBSTRING(@sql, 8001, 12000); - PRINT SUBSTRING(@sql, 12001, 16000); + PRINT SUBSTRING(@sql, 1, 4000); + PRINT SUBSTRING(@sql, 4001, 4000); + PRINT SUBSTRING(@sql, 8001, 4000); + PRINT SUBSTRING(@sql, 12001, 4000); END; EXECUTE sys.sp_executesql From f1142ceee50dc2d2b8e7a7fd54c26b4cb91c6cfb Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 21:13:43 -0400 Subject: [PATCH 35/52] Compute sp_PressureDetector sample-mode percent_signal_waits from raw delta MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The sample-mode JOIN used to set percent_signal_waits = (w2.percent_signal_waits + w.percent_signal_waits) / 2 which averages two CUMULATIVE signal-wait percentages captured at the start and end of the sample window. On a long-running server the two percentages are both close to the lifetime average, so the user got approximately "the lifetime signal-wait percentage" instead of the window-local value they asked for by setting @sample_seconds. Changed the computation to the direct window delta: 100 * (w2.signal_wait_time_ms - w.signal_wait_time_ms) / NULLIF(w2.wait_time_ms - w.wait_time_ms, 0) This required adding raw signal_wait_time_ms and wait_time_ms columns to the @waits table variable and populating them at INSERT time. The table-variable footprint grows by two bigints per row; trivial. Deliberately NOT clamped to 100. sys.dm_os_wait_stats can briefly report signal_wait > wait in short sample windows due to counter update timing, and we show the raw value so an operator sees "this sample window is too short or noisy for this metric to be meaningful" instead of a confident-looking 100% that hides the jitter. Verified @what_to_check = 'waits' with @sample_seconds = 3 on SQL Server 2022 — values now in the expected 0..100 range for well-behaved wait types, non-sample mode unchanged. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_PressureDetector/sp_PressureDetector.sql | 37 ++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/sp_PressureDetector/sp_PressureDetector.sql b/sp_PressureDetector/sp_PressureDetector.sql index 143ff613..b7851ccc 100644 --- a/sp_PressureDetector/sp_PressureDetector.sql +++ b/sp_PressureDetector/sp_PressureDetector.sql @@ -1028,6 +1028,11 @@ OPTION(MAXDOP 1, RECOMPILE);', hours_wait_time decimal(38,2), avg_ms_per_wait decimal(38,2), percent_signal_waits decimal(38,2), + /* Raw ms values so the sample-mode JOIN can compute + window-local percent_signal_waits as a proper delta ratio + rather than averaging the two cumulative snapshot ratios. */ + signal_wait_time_ms bigint, + wait_time_ms bigint, waiting_tasks_count_n bigint, sample_time datetime, sorting bigint, @@ -1173,6 +1178,8 @@ OPTION(MAXDOP 1, RECOMPILE);', hours_wait_time, avg_ms_per_wait, percent_signal_waits, + signal_wait_time_ms, + wait_time_ms, waiting_tasks_count_n, sample_time, sorting @@ -1325,6 +1332,8 @@ OPTION(MAXDOP 1, RECOMPILE);', 0. ) ), + dows.signal_wait_time_ms, + dows.wait_time_ms, dows.waiting_tasks_count, sample_time = SYSDATETIME(), @@ -1469,11 +1478,37 @@ OPTION(MAXDOP 1, RECOMPILE);', 0. ) ), + /* + Window-local percent_signal_waits = 100 * signal_delta / total_delta. + Previously this averaged the two snapshots' CUMULATIVE + percentages, which for a long-running server + approximates the lifetime signal-wait percentage — + not what the user asked for by setting @sample_seconds. + Stored raw *_wait_time_ms columns on @waits so we can + compute the correct ratio on the delta window. + + Deliberately NOT clamped to 100. sys.dm_os_wait_stats + can briefly report signal_wait > wait in short sample + windows due to counter update timing, so the raw value + can exceed 100%. Showing the raw value lets the operator + see that their window is too short / noisy for this + metric to be meaningful; hiding it behind a cap would + make a DMV jitter read like a confident 100%. + */ percent_signal_waits = CONVERT ( decimal(38,1), - (w2.percent_signal_waits + w.percent_signal_waits) / 2 + ISNULL + ( + 100.0 * (w2.signal_wait_time_ms - w.signal_wait_time_ms) / + NULLIF + ( + 1.0 * (w2.wait_time_ms - w.wait_time_ms), + 0. + ), + 0. + ) ), waiting_tasks_count = FORMAT((w2.waiting_tasks_count_n - w.waiting_tasks_count_n), 'N0'), From d010b26fc116bc96e5773050ce47483a74d10527 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 21:15:11 -0400 Subject: [PATCH 36/52] Use CONVERT instead of RTRIM for int->nvarchar in sp_HumanEventsBlockViewer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The "Unresolved:" fallback message concatenates b.object_id (int, parsed from blocked_process_report XML) into a string via RTRIM(b.object_id). RTRIM requires a string operand, so the int relies on implicit conversion first — a trailing-space-strip pattern that is brittle under uncommon ANSI/session settings. Replaced with an explicit CONVERT(nvarchar(20), b.object_id). nvarchar(20) is comfortably larger than the 11-char max (10 digits plus sign) an int can produce. ISNULL stays wrapping the result so a NULL object_id still renders as N'unknown'. No behavior change for normal runs; just removes the implicit-conversion dependency. Verified sp installs clean. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEventsBlockViewer.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql index 1bde4f7c..05568e01 100644 --- a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql +++ b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql @@ -2454,7 +2454,7 @@ SET N'database: ' + ISNULL(b.database_name, N'unknown') + N' object_id: ' + - ISNULL(RTRIM(b.object_id), N'unknown') + ISNULL(CONVERT(nvarchar(20), b.object_id), N'unknown') ) FROM #blocks AS b CROSS APPLY From b1346425fb88448911bb1803d8f980d7a5e925ed Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 21:29:26 -0400 Subject: [PATCH 37/52] Normalize database_name / currentdbname columns to sysname in sp_HumanEventsBlockViewer Several sites declared database identifier columns inconsistently: - currentdbname nvarchar(256) on one table and in one shred - currentdbname nvarchar(128) in three other shreds - database_name nvarchar(128) in the shred table, nvarchar(256) on #block_findings UNION ALL reconciled the inconsistency to nvarchar(256) at the #blocks INTO step, so there was no functional breakage, but the declarations looked like they disagreed about whether database identifiers need extra room. SQL Server caps DB names at 128 Unicode characters (sysname), so 128 is already sufficient everywhere. Normalized all five .value() shreds and both table columns to sysname. Also changed the sibling database_name column on the shred table for the same reason. Verified sproc installs clean on SQL Server 2022. sysname is accepted by xml .value() as the target type (tested with a small XML fragment) so no functional change. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEventsBlockViewer.sql | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql index 05568e01..05219829 100644 --- a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql +++ b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql @@ -775,8 +775,8 @@ BEGIN collection_time datetime2(7) NOT NULL DEFAULT SYSDATETIME(), blocked_process_report varchar(22) NOT NULL, event_time datetime2(7) NULL, - database_name nvarchar(128) NULL, - currentdbname nvarchar(256) NULL, + database_name sysname NULL, + currentdbname sysname NULL, contentious_object nvarchar(4000) NULL, activity varchar(8) NULL, blocking_tree varchar(8000) NULL, @@ -873,7 +873,7 @@ CREATE TABLE ( id integer IDENTITY PRIMARY KEY CLUSTERED, check_id integer NOT NULL, - database_name nvarchar(256) NULL, + database_name sysname NULL, object_name nvarchar(1000) NULL, finding_group nvarchar(100) NULL, finding nvarchar(4000) NULL, @@ -1312,7 +1312,7 @@ BEGIN SELECT bx.event_time, - currentdbname = bd.value('(process/@currentdbname)[1]', 'nvarchar(128)'), + currentdbname = bd.value('(process/@currentdbname)[1]', 'sysname'), spid = bd.value('(process/@spid)[1]', 'integer'), ecid = bd.value('(process/@ecid)[1]', 'integer'), query_text_pre = bd.value('(process/inputbuf/text())[1]', 'nvarchar(max)'), @@ -1369,7 +1369,7 @@ BEGIN /*Blocking queries*/ SELECT bx.event_time, - currentdbname = bg.value('(process/@currentdbname)[1]', 'nvarchar(128)'), + currentdbname = bg.value('(process/@currentdbname)[1]', 'sysname'), spid = bg.value('(process/@spid)[1]', 'integer'), ecid = bg.value('(process/@ecid)[1]', 'integer'), query_text_pre = bg.value('(process/inputbuf/text())[1]', 'nvarchar(max)'), @@ -1951,7 +1951,7 @@ SELECT log_used = bd.value('(process/@logused)[1]', 'bigint'), clientoption1 = bd.value('(process/@clientoption1)[1]', 'bigint'), clientoption2 = bd.value('(process/@clientoption2)[1]', 'bigint'), - currentdbname = bd.value('(process/@currentdbname)[1]', 'nvarchar(256)'), + currentdbname = bd.value('(process/@currentdbname)[1]', 'sysname'), currentdbid = bd.value('(process/@currentdb)[1]', 'integer'), blocking_level = 0, sort_order = CONVERT(varchar(400), ''), @@ -2071,7 +2071,7 @@ SELECT log_used = bg.value('(process/@logused)[1]', 'bigint'), clientoption1 = bg.value('(process/@clientoption1)[1]', 'bigint'), clientoption2 = bg.value('(process/@clientoption2)[1]', 'bigint'), - currentdbname = bg.value('(process/@currentdbname)[1]', 'nvarchar(128)'), + currentdbname = bg.value('(process/@currentdbname)[1]', 'sysname'), currentdbid = bg.value('(process/@currentdb)[1]', 'integer'), blocking_level = 0, sort_order = CONVERT(varchar(400), ''), From d49d858549ff05bd217380cca89e4b3a077778ac Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 21:33:16 -0400 Subject: [PATCH 38/52] Honor @gimme_danger in sp_HumanEvents table-logging wait insert The live wait-stats parser gates zero-duration wait events on @gimme_danger so the caller can opt into capturing them (see line ~2704). The @keep_alive = 1 table-logging path built its own dynamic INSERT from the XML cache but hardcoded 'duration > 0' in the WHERE, so the same @gimme_danger = 1 call that captured zero-duration waits in live-output mode silently dropped them when logging to a table. Added the same (... OR @gimme_danger = 1) guard in the dynamic WHERE and plumbed @gimme_danger through sp_executesql alongside @date_filter. Verified sproc installs clean. Scope confirmed: @gimme_danger's documented purpose is specifically "zero-duration wait events"; blocking / queries / compiles / recompiles have their own @*_duration_ms parameter-driven thresholds and aren't in scope. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEvents.sql | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/sp_HumanEvents/sp_HumanEvents.sql b/sp_HumanEvents/sp_HumanEvents.sql index 5ce07af0..bae3b343 100644 --- a/sp_HumanEvents/sp_HumanEvents.sql +++ b/sp_HumanEvents/sp_HumanEvents.sql @@ -4212,7 +4212,11 @@ END plan_handle = c.value(''xs:hexBinary((action[@name="plan_handle"]/value/text())[1])'', ''varbinary(64)'') FROM #human_events_xml_internal AS xet OUTER APPLY xet.human_events_xml.nodes(''//event'') AS oa(c) -WHERE c.exist(''(data[@name="duration"]/value/text()[. > 0])'') = 1 +/* Match the live parser''s @gimme_danger semantic — without it, the + table-logging path silently dropped zero-duration waits even when + the user explicitly opted into capturing them via @gimme_danger = 1. */ +WHERE (c.exist(''(data[@name="duration"]/value/text()[. > 0])'') = 1 + OR @gimme_danger = 1) AND c.exist(''@timestamp[. > sql:variable("@date_filter")]'') = 1;') ) WHEN @event_type_check LIKE N'%lock%' /*Blocking!*/ @@ -4727,8 +4731,9 @@ ORDER BY /* this executes the insert */ EXECUTE sys.sp_executesql @table_sql, - N'@date_filter datetime2(7)', - @date_filter; + N'@date_filter datetime2(7), @gimme_danger bit', + @date_filter, + @gimme_danger; /*Update the worker table's last checked, and conditionally, updated dates*/ UPDATE From 48a8902f90eb5100bf3d638f789f0288c2a86220 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 21:35:48 -0400 Subject: [PATCH 39/52] Fix remaining SUBSTRING length-vs-endpos chunk-print bugs across sprocs Missed instances of the same debug PRINT chunk-tiling bug I already fixed in sp_QuickieStore / sp_IndexCleanup / sp_PerfCheck: sp_HumanEvents: - @view_sql block (line ~4065): 10 chunks, all (start, length-as-end) - @table_sql block (line ~4719): 10 chunks, same pattern (first chunk also had start=0 off-by-one) sp_PressureDetector: - @disk_check (line ~1777) - @mem_sql (line ~3302) - @cpu_sql (line ~4018) SUBSTRING's third argument is length, not end-position, so chunks like (4001, 8000) / (8001, 12000) took 8000 / 12000 chars from each start and produced massively overlapping windows. Normalized all sites to tile 4000-char chunks starting at 1, 4001, 8001, ... Debug-only output, no runtime behavior change. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEvents.sql | 49 ++++++++++++--------- sp_PressureDetector/sp_PressureDetector.sql | 12 ++--- 2 files changed, 35 insertions(+), 26 deletions(-) diff --git a/sp_HumanEvents/sp_HumanEvents.sql b/sp_HumanEvents/sp_HumanEvents.sql index bae3b343..6be7758d 100644 --- a/sp_HumanEvents/sp_HumanEvents.sql +++ b/sp_HumanEvents/sp_HumanEvents.sql @@ -4062,16 +4062,18 @@ BEGIN IF @debug = 1 BEGIN - PRINT SUBSTRING(@view_sql, 0, 4000); - PRINT SUBSTRING(@view_sql, 4001, 8000); - PRINT SUBSTRING(@view_sql, 8001, 12000); - PRINT SUBSTRING(@view_sql, 12001, 16000); - PRINT SUBSTRING(@view_sql, 16001, 20000); - PRINT SUBSTRING(@view_sql, 20001, 24000); - PRINT SUBSTRING(@view_sql, 24001, 28000); - PRINT SUBSTRING(@view_sql, 28001, 32000); - PRINT SUBSTRING(@view_sql, 32001, 36000); - PRINT SUBSTRING(@view_sql, 36001, 40000); + /* SUBSTRING third arg is length, not end-position. See + the @table_sql block below for the same fix. */ + PRINT SUBSTRING(@view_sql, 1, 4000); + PRINT SUBSTRING(@view_sql, 4001, 4000); + PRINT SUBSTRING(@view_sql, 8001, 4000); + PRINT SUBSTRING(@view_sql, 12001, 4000); + PRINT SUBSTRING(@view_sql, 16001, 4000); + PRINT SUBSTRING(@view_sql, 20001, 4000); + PRINT SUBSTRING(@view_sql, 24001, 4000); + PRINT SUBSTRING(@view_sql, 28001, 4000); + PRINT SUBSTRING(@view_sql, 32001, 4000); + PRINT SUBSTRING(@view_sql, 36001, 4000); END; IF @debug = 1 BEGIN RAISERROR(N'creating view %s', 0, 1, @event_type_check) WITH NOWAIT; END; @@ -4716,16 +4718,23 @@ ORDER BY IF @debug = 1 BEGIN - PRINT SUBSTRING(@table_sql, 0, 4000); - PRINT SUBSTRING(@table_sql, 4001, 8000); - PRINT SUBSTRING(@table_sql, 8001, 12000); - PRINT SUBSTRING(@table_sql, 12001, 16000); - PRINT SUBSTRING(@table_sql, 16001, 20000); - PRINT SUBSTRING(@table_sql, 20001, 24000); - PRINT SUBSTRING(@table_sql, 24001, 28000); - PRINT SUBSTRING(@table_sql, 28001, 32000); - PRINT SUBSTRING(@table_sql, 32001, 36000); - PRINT SUBSTRING(@table_sql, 36001, 40000); + /* SUBSTRING third arg is length, not end-position. + Previous values (4001, 8000), (8001, 12000), etc. took + 8000 / 12000 / 16000 chars starting at each offset, so + chunks massively overlapped instead of tiling. First + call with start=0 also returned 3,999 chars (0-start + eats one position). Normalized to 4000-char tiles + starting at 1, 4001, 8001, ... */ + PRINT SUBSTRING(@table_sql, 1, 4000); + PRINT SUBSTRING(@table_sql, 4001, 4000); + PRINT SUBSTRING(@table_sql, 8001, 4000); + PRINT SUBSTRING(@table_sql, 12001, 4000); + PRINT SUBSTRING(@table_sql, 16001, 4000); + PRINT SUBSTRING(@table_sql, 20001, 4000); + PRINT SUBSTRING(@table_sql, 24001, 4000); + PRINT SUBSTRING(@table_sql, 28001, 4000); + PRINT SUBSTRING(@table_sql, 32001, 4000); + PRINT SUBSTRING(@table_sql, 36001, 4000); END; /* this executes the insert */ diff --git a/sp_PressureDetector/sp_PressureDetector.sql b/sp_PressureDetector/sp_PressureDetector.sql index b7851ccc..42410274 100644 --- a/sp_PressureDetector/sp_PressureDetector.sql +++ b/sp_PressureDetector/sp_PressureDetector.sql @@ -1774,8 +1774,8 @@ OPTION(MAXDOP 1, RECOMPILE);', IF @debug = 1 BEGIN - PRINT SUBSTRING(@disk_check, 1, 4000); - PRINT SUBSTRING(@disk_check, 4001, 8000); + PRINT SUBSTRING(@disk_check, 1, 4000); + PRINT SUBSTRING(@disk_check, 4001, 4000); END; INSERT @@ -3299,8 +3299,8 @@ OPTION(MAXDOP 1, RECOMPILE);', IF @debug = 1 BEGIN - PRINT SUBSTRING(@mem_sql, 1, 4000); - PRINT SUBSTRING(@mem_sql, 4001, 8000); + PRINT SUBSTRING(@mem_sql, 1, 4000); + PRINT SUBSTRING(@mem_sql, 4001, 4000); END; IF @log_to_table = 0 @@ -4015,8 +4015,8 @@ OPTION(MAXDOP 1, RECOMPILE);', IF @debug = 1 BEGIN - PRINT SUBSTRING(@cpu_sql, 1, 4000); - PRINT SUBSTRING(@cpu_sql, 4001, 8000); + PRINT SUBSTRING(@cpu_sql, 1, 4000); + PRINT SUBSTRING(@cpu_sql, 4001, 4000); END; IF @log_to_table = 0 From 7bd5bfc76aa029862a51b2e07e8901da4e78c0d9 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 21:39:00 -0400 Subject: [PATCH 40/52] Fix inverted recent-modification guard in sp_HumanEvents view-recreation block MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The outer IF gate for the "create or alter views" block had: IF EXISTS (...views not yet created...) OR (SELECT o.modify_date FROM sys.all_objects WHERE name = 'sp_HumanEvents') < DATEADD(HOUR, -1, SYSDATETIME()) The comment right above said "If the proc has been modified, maybe views have been added or changed?" — but the `<` comparison means "modify_date is earlier than an hour ago," i.e., the proc has existed in its current form for MORE than an hour. Which is true for every install once the first hour passes and stays true forever. Effect: the outer block's guard evaluated true on every 5-second iteration of the collector loop. @view_tracker short-circuits the actual view-creation work after the first pass, but the scan of #human_events_worker and the sys.all_objects lookup ran every cycle regardless — unnecessary churn on a keep-alive instance. Flipped to `>` so the guard fires only when the proc was modified within the last hour (i.e., just after a real upgrade). Matches the commented intent and lets aged installs skip the block entirely after the initial view creation completes. Verified sproc installs clean. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEvents.sql | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/sp_HumanEvents/sp_HumanEvents.sql b/sp_HumanEvents/sp_HumanEvents.sql index 6be7758d..84af9b14 100644 --- a/sp_HumanEvents/sp_HumanEvents.sql +++ b/sp_HumanEvents/sp_HumanEvents.sql @@ -3885,13 +3885,19 @@ IF EXISTS AND hew.is_view_created = 0 ) OR -( /* If the proc has been modified, maybe views have been added or changed? */ +( /* If the proc has been modified, maybe views have been added or changed? + "Recently modified" means modify_date is AFTER (later than) an hour + ago — the original used < which is "more than an hour ago," i.e., + true for every install older than one hour, so the guard fired on + every 5-second loop iteration forever. @view_tracker short-circuits + the actual view-creation work but the scan of #human_events_worker + and sys.all_objects still ran every cycle. */ SELECT o.modify_date FROM sys.all_objects AS o WHERE o.type = N'P' AND o.name = N'sp_HumanEvents' -) < DATEADD(HOUR, -1, SYSDATETIME()) +) > DATEADD(HOUR, -1, SYSDATETIME()) BEGIN IF @debug = 1 BEGIN RAISERROR(N'Found views to create, beginning!', 0, 1) WITH NOWAIT; END; IF From 3622aea959d392abf22af4cb9cabff1b56724b25 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 21:45:28 -0400 Subject: [PATCH 41/52] Document intentional event_file preference in sp_HumanEventsBlockViewer auto-detect MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The @target_type auto-detect uses SELECT TOP (1) ... ORDER BY t.target_name, which alphabetically picks 'event_file' over 'ring_buffer' when a session has both attached. That preference is deliberate — event files persist, the ring buffer is an in-memory window that drops older events under memory pressure, so a blocking report built from file-target data is more likely to cover the full window the caller asked for. The alphabetical ordering just happens to match that intent. Added a block comment explaining the choice so a future reviewer doesn't "fix" the ORDER BY thinking it's accidental. No behavior change. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEventsBlockViewer.sql | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql index 05219829..49ea78c3 100644 --- a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql +++ b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql @@ -937,6 +937,16 @@ IF @debug = 1 BEGIN RAISERROR('What kind of target does %s have?', 0, 1, @session_name) WITH NOWAIT; END; +/* +Auto-detect @target_type when not supplied. When a session has both +targets attached, ORDER BY t.target_name picks 'event_file' over +'ring_buffer' alphabetically — this is DELIBERATE. event_file is the +more reliable target (ring_buffer has a finite in-memory window and +drops older events under pressure), so a blocking report built from +the file target has a better chance of covering the full window the +caller asked for. Don't "fix" the ORDER BY to ring_buffer unless you +want faster but less complete reads. +*/ IF @target_type IS NULL AND @is_system_health = 0 BEGIN From 9165e76998c5a1aff35a3823596e2aca4f9f5b58 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 21:46:34 -0400 Subject: [PATCH 42/52] Case-insensitively compare @target_type = 'ring_buffer' in sp_HumanEventsBlockViewer The system_health branch had a lone case-sensitive comparison: IF @target_type = N'ring_buffer' which would evaluate false on a case-sensitive server collation (or if the caller passed N'Ring_Buffer' / N'RING_BUFFER') and silently fall through to the event_file ELSE branch. Every other @target_type check in the sproc (lines ~476, 512, 889, and the parameter-validation block) already uses LOWER(@target_type), so this one site was the only outlier. Changed to LOWER(@target_type) = N'ring_buffer' to match convention. No behavior change on default CI collations; fixes the silent fallthrough on CS collations and on case-variant inputs. Verified sproc installs clean. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HumanEvents/sp_HumanEventsBlockViewer.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql index 49ea78c3..eb90d291 100644 --- a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql +++ b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql @@ -1227,7 +1227,7 @@ BEGIN RAISERROR('Inserting to #sp_server_diagnostics_component_result for target type: %s and system health: %s', 0, 1, @target_type, @is_system_health_msg) WITH NOWAIT; END; - IF @target_type = N'ring_buffer' + IF LOWER(@target_type) = N'ring_buffer' BEGIN INSERT #sp_server_diagnostics_component_result From 1ca8808aebfb950f9d0abafef959d17d48efc5ba Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 21:48:46 -0400 Subject: [PATCH 43/52] Add configuration_id 17 (ISOLATE_SECURITY_POLICY_CARDINALITY) to sp_PerfCheck DSC check The database-scoped-configurations comparison had a CASE branch for ISOLATE_SECURITY_POLICY_CARDINALITY but the branch could never fire: - The default-row INSERT VALUES clause didn't include a row for configuration_id 17, so the expected-default reference was missing. - The WHERE sc.configuration_id IN (...) list skipped 17, so that row never made it into #database_scoped_configs in the first place. Added 17 to both lists (between 16 and 18 where it belongs numerically and alphabetically). Verified configuration_id 17 is the actual ID on SQL Server 2022. Behavior change: databases that have ISOLATE_SECURITY_POLICY_CARDINALITY set to the non-default value of 1 will now surface as non-default in the DSC check output, as the code always intended. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_PerfCheck/sp_PerfCheck.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sp_PerfCheck/sp_PerfCheck.sql b/sp_PerfCheck/sp_PerfCheck.sql index c17c102b..392782c4 100644 --- a/sp_PerfCheck/sp_PerfCheck.sql +++ b/sp_PerfCheck/sp_PerfCheck.sql @@ -4559,6 +4559,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. (@current_database_id, @current_database_name, 10, N''TSQL_SCALAR_UDF_INLINING'', NULL, NULL, 1), (@current_database_id, @current_database_name, 13, N''OPTIMIZE_FOR_AD_HOC_WORKLOADS'', NULL, NULL, 1), (@current_database_id, @current_database_name, 16, N''ROW_MODE_MEMORY_GRANT_FEEDBACK'', NULL, NULL, 1), + (@current_database_id, @current_database_name, 17, N''ISOLATE_SECURITY_POLICY_CARDINALITY'', NULL, NULL, 1), (@current_database_id, @current_database_name, 18, N''BATCH_MODE_ON_ROWSTORE'', NULL, NULL, 1), (@current_database_id, @current_database_name, 19, N''DEFERRED_COMPILATION_TV'', NULL, NULL, 1), (@current_database_id, @current_database_name, 20, N''ACCELERATED_PLAN_FORCING'', NULL, NULL, 1), @@ -4631,7 +4632,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. WHERE sc.configuration_id IN ( 1, 2, 3, 4, 7, 8, 9, - 10, 13, 16, 18, 19, 20, 24, + 10, 13, 16, 17, 18, 19, 20, 24, 27, 28, 31, 33, 34, 35, 37, 39, 40, 41, 42, 43 /* SQL Server 2025 options */ ); From 58817cd077b675d5892a054536ed02b4346877d9 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 21:53:46 -0400 Subject: [PATCH 44/52] Treat sp_QuickieStore as 2022-class when >=4 of 5 QS views exist The @sql_2022_views gate ran COUNT_BIG(*) = 5 FROM sys.all_objects WHERE name IN (plan_feedback, query_hints, query_variant, replicas, plan_forcing_locations) so if any one of those five system catalog views was missing, every 2022-era feature (hints, feedback, variants, forcing-location logic) was silently disabled. In practice the only view in this set that can be absent on an otherwise-2022-class database is query_store_replicas, which is managed differently on standard Azure SQL Database tiers. The other four are what the sproc actually uses for hints/feedback/variants, and they exist on every relevant platform. Requiring all 5 caused those features to be disabled on Azure SQL DB for no good reason. Loosened the threshold to >= 4. Pre-2022 servers expose 0 or 1 of these views, not 4, so this doesn't false-positive older builds. Verified sproc still installs clean. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_QuickieStore/sp_QuickieStore.sql | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/sp_QuickieStore/sp_QuickieStore.sql b/sp_QuickieStore/sp_QuickieStore.sql index aeef18d9..e88c779c 100644 --- a/sp_QuickieStore/sp_QuickieStore.sql +++ b/sp_QuickieStore/sp_QuickieStore.sql @@ -4307,12 +4307,20 @@ END; /* See if our cool new 2022 views exist. -May have to tweak this if views aren't present in some cloudy situations. + +Threshold is >= 4 rather than = 5 because query_store_replicas is +the one view in this set that standard Azure SQL Database tiers can +be missing (replicas are managed differently there). The other four +are what the sproc actually uses for hints, feedback, and variants, +and those work fine on Azure SQL DB. Requiring all 5 would disable +every 2022-era feature on DBs that are legitimately 2022-class. +4 of 5 plus the rest being older builds is not a realistic shape — +pre-2022 servers have 0 or 1 of these views, not 4. */ SELECT @sql_2022_views = CASE - WHEN COUNT_BIG(*) = 5 + WHEN COUNT_BIG(*) >= 4 THEN 1 ELSE 0 END From 5f7d7a9e1733efe156db393b88e2a3f4dac7a63c Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 21:58:40 -0400 Subject: [PATCH 45/52] Remove per-row @minimum_execution_count pre-filter in sp_QuickieCache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All four aggregation paths (Statement / Procedure / Function / Trigger) applied @minimum_execution_count twice: - Per-row: WHERE .execution_count >= @minimum_execution_count - Aggregate: HAVING SUM(execution_count) >= @minimum_execution_count The per-row filter excluded individual rows whose single-plan execution_count was below the threshold before the aggregation got to run. A query_hash / object that had many plans each with small execution counts — think recompile-heavy or hint-varying paths — had every row filtered out, so the HAVING SUM never got the chance to see them even though their group total comfortably cleared @minimum_execution_count. The parameter description ("noise floor for single-exec queries") and the HAVING shape both point at aggregate intent. Dropped the per-row predicate from all four paths; HAVING SUM is the sole enforcement point. Kept the Statement-path sys DB filter and the other WHERE predicates intact. Left a short "see Statement path" comment on each of the three non-Statement paths so the shared reasoning isn't hidden. Verified sproc installs clean and @top = 3 runs against SQL Server 2022 without errors. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_QuickieCache/sp_QuickieCache.sql | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/sp_QuickieCache/sp_QuickieCache.sql b/sp_QuickieCache/sp_QuickieCache.sql index e2b35b39..d224010a 100644 --- a/sp_QuickieCache/sp_QuickieCache.sql +++ b/sp_QuickieCache/sp_QuickieCache.sql @@ -1083,8 +1083,14 @@ CROSS APPLY FROM sys.dm_exec_plan_attributes(qs.plan_handle) AS pa WHERE pa.attribute = N''dbid'' ) AS pa -WHERE qs.query_hash <> 0x0000000000000000 -AND qs.execution_count >= @minimum_execution_count' + +WHERE qs.query_hash <> 0x0000000000000000' + + /* @minimum_execution_count is enforced ONLY in the HAVING + SUM(execution_count) below — applying it per-row here + filtered out individual plans whose single-plan execution_count + was below the floor but whose group total was above it + (think: a recompile-heavy query with many plans each run a + few times that add up to a lot). Same reasoning applies to + the procedure / function / trigger paths further down. */ CASE WHEN @ignore_system_databases = 1 THEN N' @@ -1271,8 +1277,9 @@ OPTION(RECOMPILE, MAXDOP 1);'; ps.execution_count DESC ) FROM sys.dm_exec_procedure_stats AS ps - WHERE ps.execution_count >= @minimum_execution_count - AND ps.database_id > CASE WHEN @ignore_system_databases = 1 THEN 4 ELSE 0 END + /* See Statement path comment re: why @minimum_execution_count + is HAVING-only rather than a per-row pre-filter. */ + WHERE ps.database_id > CASE WHEN @ignore_system_databases = 1 THEN 4 ELSE 0 END AND ps.database_id < 32761 AND ps.database_id = ISNULL(@database_id, ps.database_id) AND ps.cached_time >= ISNULL(@start_date, ps.cached_time) @@ -1380,8 +1387,9 @@ FROM fs.execution_count DESC ) FROM sys.dm_exec_function_stats AS fs - WHERE fs.execution_count >= @minimum_execution_count - AND fs.database_id > CASE WHEN @ignore_system_databases = 1 THEN 4 ELSE 0 END + /* See Statement path comment re: why @minimum_execution_count + is HAVING-only rather than a per-row pre-filter. */ + WHERE fs.database_id > CASE WHEN @ignore_system_databases = 1 THEN 4 ELSE 0 END AND fs.database_id < 32761 AND fs.database_id = ISNULL(@database_id, fs.database_id) AND fs.cached_time >= ISNULL(@start_date, fs.cached_time) @@ -1486,8 +1494,9 @@ OPTION(RECOMPILE, MAXDOP 1);'; ts.execution_count DESC ) FROM sys.dm_exec_trigger_stats AS ts - WHERE ts.execution_count >= @minimum_execution_count - AND ts.database_id > CASE WHEN @ignore_system_databases = 1 THEN 4 ELSE 0 END + /* See Statement path comment re: why @minimum_execution_count + is HAVING-only rather than a per-row pre-filter. */ + WHERE ts.database_id > CASE WHEN @ignore_system_databases = 1 THEN 4 ELSE 0 END AND ts.database_id < 32761 AND ts.database_id = ISNULL(@database_id, ts.database_id) AND ts.cached_time >= ISNULL(@start_date, ts.cached_time) From 4bbd3f2a02d5e870da6f54f630c19f45703fb775 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 22:00:15 -0400 Subject: [PATCH 46/52] Bump sp_PerfCheck check_id 1002 (max memory near physical RAM) to priority 20 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The finding for "Max Server Memory >= 95% of physical memory" carried priority 40 with the comment "/* High priority */" — the comment matched intent but the value corresponded to Low priority per the sproc's convention (20 = High, 30 = Medium, 40 = Low, as used by check 1001 above for the less-severe "min memory too close to max" case). This check flags an actual OS-starvation risk: if SQL Server reserves >= 95% of the box's physical memory, the OS and other processes are squeezed, which can cause paging, thrash, and RGS waits across the whole instance — well above a config recommendation in severity. Corrected the priority to 20 to match the comment and the real operator impact. Sibling check 1001 (min/max ratio) stays at 40 — still a legitimate config recommendation, not a runtime risk. Verified sproc installs clean. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_PerfCheck/sp_PerfCheck.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sp_PerfCheck/sp_PerfCheck.sql b/sp_PerfCheck/sp_PerfCheck.sql index 392782c4..dd1c4766 100644 --- a/sp_PerfCheck/sp_PerfCheck.sql +++ b/sp_PerfCheck/sp_PerfCheck.sql @@ -3517,7 +3517,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. VALUES ( 1002, - 40, /* High priority */ + 20, /* High priority — OS-starvation risk */ N'Server Configuration', N'Max Server Memory Too Close To Physical Memory', N'Max server memory (' + From c77222f9291bdd66f9e94a318fa2c4ed23a7a01d Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 22:03:45 -0400 Subject: [PATCH 47/52] Gate sp_PerfCheck dm_os_memory_health_history read on VIEW SERVER STATE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @health_history_exists was computed from OBJECT_ID('sys.dm_os_memory_health_history') IS NOT NULL, which succeeds for any caller with VIEW DEFINITION on server metadata (effectively every login). Reading the DMV itself requires VIEW SERVER STATE — so a non-sysadmin caller without VSS would see the existence check pass, enter the IF block, and hit an unhandled permission error inside the sp_executesql. The sproc already computes @has_view_server_state earlier, so the fix is just AND @has_view_server_state = 1 on the gate. Left a short comment explaining why the extra clause matters since the existence check alone looks sufficient at a glance. Verified sproc installs clean. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_PerfCheck/sp_PerfCheck.sql | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sp_PerfCheck/sp_PerfCheck.sql b/sp_PerfCheck/sp_PerfCheck.sql index dd1c4766..6f2c4c45 100644 --- a/sp_PerfCheck/sp_PerfCheck.sql +++ b/sp_PerfCheck/sp_PerfCheck.sql @@ -2263,8 +2263,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. SET @stolen_memory_pct = (@stolen_memory_gb / (@buffer_pool_size_gb + @stolen_memory_gb)) * 100.0; - /* Query memory health history if available (SQL Server 2025+) */ + /* Query memory health history if available (SQL Server 2025+). + OBJECT_ID existence-check only requires VIEW DEFINITION + metadata access; reading the DMV itself requires + VIEW SERVER STATE. Without gating on @has_view_server_state + a non-sysadmin caller would hit an unhandled permission + error from inside the sp_executesql. */ IF @health_history_exists = CONVERT(bit, 'true') + AND @has_view_server_state = 1 BEGIN EXECUTE sys.sp_executesql N' From c79ad737e5a26f3daf01afc38cf0ef2f9e2bb582 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 22:12:50 -0400 Subject: [PATCH 48/52] Tighten sp_PerfCheck stolen-memory counter filter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The stolen-memory lookup was: WHERE dopc.counter_name LIKE N'Stolen Server%' One counter matches today (Stolen Server Memory (KB) under SQLServer:Memory Manager) and a scalar SELECT expected exactly that. Works, but fragile — a future build that adds another Stolen-Server-prefixed counter would silently change which row's cntr_value the scalar assignment latched onto, and potentially pull from a different object_name than intended. Tightened to the same shape Erik uses for other counter lookups in this sproc: WHERE RTRIM(dopc.object_name) LIKE N'%Memory Manager%' AND RTRIM(dopc.counter_name) = N'Stolen Server Memory (KB)' The Memory Manager object_name LIKE covers both default instances (SQLServer:Memory Manager) and named instances (MSSQL$:Memory Manager). RTRIM handles the nchar(128) trailing-space padding that sys.dm_os_performance_counters emits. Verified sproc still reports stolen memory correctly on SQL Server 2022. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_PerfCheck/sp_PerfCheck.sql | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/sp_PerfCheck/sp_PerfCheck.sql b/sp_PerfCheck/sp_PerfCheck.sql index 6f2c4c45..f9dce225 100644 --- a/sp_PerfCheck/sp_PerfCheck.sql +++ b/sp_PerfCheck/sp_PerfCheck.sql @@ -2246,7 +2246,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. FROM sys.dm_os_memory_clerks AS domc WHERE domc.type = N'MEMORYCLERK_SQLBUFFERPOOL'; - /* Get stolen memory */ + /* Get stolen memory. + Anchored both object_name (LIKE %Memory Manager% to cover + both default and named-instance prefixes like + "SQLServer:Memory Manager" and "MSSQL$INST:Memory Manager") + and counter_name (exact match). Previous filter was a loose + LIKE N'Stolen Server%' that relied on the counter name being + globally unique; fine today but would silently drift if a + future build adds another prefix-matching counter. */ SELECT @stolen_memory_gb = CONVERT @@ -2255,7 +2262,8 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. dopc.cntr_value / 1024.0 / 1024.0 ) FROM sys.dm_os_performance_counters AS dopc - WHERE dopc.counter_name LIKE N'Stolen Server%'; + WHERE RTRIM(dopc.object_name) LIKE N'%Memory Manager%' + AND RTRIM(dopc.counter_name) = N'Stolen Server Memory (KB)'; /* Calculate stolen memory percentage */ IF @buffer_pool_size_gb > 0 From 1b9ba459e341b926499076ad1eac1aeac3bf836a Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 22:15:33 -0400 Subject: [PATCH 49/52] Split sp_PerfCheck pagelatch/uptime scalar assignment into two SELECTs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The "calculate pagelatch wait time" block assigned @pagelatch_wait_hours (a SUM aggregate over wait_stats) and @server_uptime_hours (a scalar DATEDIFF over sys_info) in a single SELECT by CROSS JOIN'ing the two DMVs and GROUP BY'ing on the same DATEDIFF expression. It worked only because sys_info is always a one-row view — so one group resolves one row and the scalar assignment lands. The shape is unusual enough that a future reader would either: - think the GROUP BY is load-bearing (it isn't, for correctness) - or add a row-selecting predicate and break the implicit one-row guarantee Split into two plain scalar SELECTs — one over sys_info for uptime, one over wait_stats for the pagelatch sum. Verified the produced values match (uptime 78.96h, pagelatch 9.84h on the test server) and sproc still runs clean. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_PerfCheck/sp_PerfCheck.sql | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/sp_PerfCheck/sp_PerfCheck.sql b/sp_PerfCheck/sp_PerfCheck.sql index f9dce225..0a18f970 100644 --- a/sp_PerfCheck/sp_PerfCheck.sql +++ b/sp_PerfCheck/sp_PerfCheck.sql @@ -2060,7 +2060,19 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* Check for stolen memory from buffer pool */ IF @has_view_server_state = 1 BEGIN - /* Calculate pagelatch wait time for TempDB contention check */ + /* Calculate pagelatch wait time for TempDB contention check. + Split into two scalar SELECTs — the previous version mixed an + aggregated value (@pagelatch_wait_hours) with a non-aggregated + one (@server_uptime_hours) in the same SELECT by joining + wait_stats to sys_info and GROUP BY'ing on the uptime + expression. It worked only because sys_info is always a + single-row view, and the GROUP BY on a scalar expression + reads oddly. */ + SELECT + @server_uptime_hours = + DATEDIFF(SECOND, osi.sqlserver_start_time, SYSDATETIME()) / 3600.0 + FROM sys.dm_os_sys_info AS osi; + SELECT @pagelatch_wait_hours = SUM @@ -2070,13 +2082,8 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. THEN osw.wait_time_ms / 1000.0 / 3600.0 ELSE 0 END - ), - @server_uptime_hours = - DATEDIFF(SECOND, osi.sqlserver_start_time, SYSDATETIME()) / 3600.0 - FROM sys.dm_os_wait_stats AS osw - CROSS JOIN sys.dm_os_sys_info AS osi - GROUP BY - DATEDIFF(SECOND, osi.sqlserver_start_time, SYSDATETIME()) / 3600.0; + ) + FROM sys.dm_os_wait_stats AS osw; SET @pagelatch_ratio_to_uptime = @pagelatch_wait_hours / NULLIF(@server_uptime_hours, 0) * 100; From 3d5bf7bde886269ad022703a3297eed3573ff59f Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 22:20:38 -0400 Subject: [PATCH 50/52] Honor @start_date / @end_date in sp_QuickieCache single-use-plans mode The @find_single_use_plans = 1 path filtered the plan cache to qs.execution_count = 1 and applied the other scoping predicates (@ignore_system_databases, @database_id, system-db sentinel) but silently ignored @start_date and @end_date. A user asking "which single-use plans were compiled in this time window" got everything regardless of window. Added the two filters against qs.creation_time (same column the displayed plan_age calculation already uses) in the shape used by the statement / procedure / function / trigger paths: AND (@start_date IS NULL OR qs.creation_time >= @start_date) AND (@end_date IS NULL OR qs.creation_time < @end_date) Verified sproc installs clean and @find_single_use_plans = 1 runs without errors on SQL Server 2022. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_QuickieCache/sp_QuickieCache.sql | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sp_QuickieCache/sp_QuickieCache.sql b/sp_QuickieCache/sp_QuickieCache.sql index d224010a..4979a271 100644 --- a/sp_QuickieCache/sp_QuickieCache.sql +++ b/sp_QuickieCache/sp_QuickieCache.sql @@ -429,6 +429,12 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AND (@ignore_system_databases = 0 OR ISNULL(CONVERT(integer, pa.value), 0) NOT IN (1, 2, 3, 4)) AND ISNULL(CONVERT(integer, pa.value), 0) < 32761 AND (@database_id IS NULL OR CONVERT(integer, pa.value) = @database_id) + /* Honor @start_date / @end_date the same as the statement / + procedure / function / trigger paths below — the filters + were documented as applying to all modes, but this + @find_single_use_plans branch silently ignored them before. */ + AND (@start_date IS NULL OR qs.creation_time >= @start_date) + AND (@end_date IS NULL OR qs.creation_time < @end_date) ORDER BY cp.size_in_bytes DESC OPTION(RECOMPILE, MAXDOP 1); From d85c660664794c4b8757de1cde7f0b8d2b219e43 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Sun, 19 Apr 2026 22:25:17 -0400 Subject: [PATCH 51/52] Normalize @database_id filter on sql_variant pa.value in sp_QuickieCache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Five sites in the sproc compared a sql_variant pa.value to @database_id via explicit CONVERT(integer, pa.value) = @database_id. Two outliers did the comparison implicitly (pa.value = @database_id) — one inside a plain SELECT (the @total_plans scan) and one inside the dynamic SQL fragment. Implicit comparisons on sql_variant have less predictable plan shapes; matching the rest of the sproc's convention is cheap. Switched both outliers to CONVERT(integer, pa.value) = @database_id. Verified sproc installs clean. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_QuickieCache/sp_QuickieCache.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sp_QuickieCache/sp_QuickieCache.sql b/sp_QuickieCache/sp_QuickieCache.sql index 4979a271..5a63364f 100644 --- a/sp_QuickieCache/sp_QuickieCache.sql +++ b/sp_QuickieCache/sp_QuickieCache.sql @@ -584,7 +584,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. WHERE pa.attribute = N'dbid' ) AS pa WHERE 1 = 1 - AND (@database_id IS NULL OR pa.value = @database_id) + AND (@database_id IS NULL OR CONVERT(integer, pa.value) = @database_id) OPTION(RECOMPILE); IF @total_plans > 0 @@ -1107,7 +1107,7 @@ AND ISNULL(pa.value, 0) < 32761' CASE WHEN @database_id IS NOT NULL THEN N' -AND pa.value = @database_id' +AND CONVERT(integer, pa.value) = @database_id' ELSE N'' END + CASE From ccb22a04cec33cb787b5abe6ae574d1e594aa585 Mon Sep 17 00:00:00 2001 From: Erik Darling <2136037+erikdarlingdata@users.noreply.github.com> Date: Mon, 20 Apr 2026 11:46:54 -0400 Subject: [PATCH 52/52] Bump all procs to X.5 for 4/20 release All 11 procs updated: sp_IndexCleanup 2.4 -> 2.5 sp_QueryStoreCleanup 1.4 -> 1.5 sp_QuickieStore 6.4 -> 6.5 sp_PerfCheck 2.4 -> 2.5 sp_HumanEvents 7.4 -> 7.5 sp_HumanEventsBlockViewer 5.4 -> 5.5 sp_PressureDetector 6.4 -> 6.5 sp_LogHunter 3.4 -> 3.5 sp_HealthParser 3.4 -> 3.5 sp_QuickieCache 1.4 -> 1.5 sp_QueryReproBuilder 1.4 -> 1.5 @version_date set to 20260420. Co-Authored-By: Claude Opus 4.7 (1M context) --- sp_HealthParser/sp_HealthParser.sql | 4 ++-- sp_HumanEvents/sp_HumanEvents.sql | 4 ++-- sp_HumanEvents/sp_HumanEventsBlockViewer.sql | 4 ++-- sp_IndexCleanup/sp_IndexCleanup.sql | 4 ++-- sp_LogHunter/sp_LogHunter.sql | 4 ++-- sp_PerfCheck/sp_PerfCheck.sql | 4 ++-- sp_PressureDetector/sp_PressureDetector.sql | 4 ++-- sp_QueryReproBuilder/sp_QueryReproBuilder.sql | 4 ++-- sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql | 4 ++-- sp_QuickieCache/sp_QuickieCache.sql | 4 ++-- sp_QuickieStore/sp_QuickieStore.sql | 4 ++-- 11 files changed, 22 insertions(+), 22 deletions(-) diff --git a/sp_HealthParser/sp_HealthParser.sql b/sp_HealthParser/sp_HealthParser.sql index 948abcb4..105f98de 100644 --- a/sp_HealthParser/sp_HealthParser.sql +++ b/sp_HealthParser/sp_HealthParser.sql @@ -72,8 +72,8 @@ BEGIN SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT - @version = '3.4', - @version_date = '20260401'; + @version = '3.5', + @version_date = '20260420'; IF @help = 1 BEGIN diff --git a/sp_HumanEvents/sp_HumanEvents.sql b/sp_HumanEvents/sp_HumanEvents.sql index 84af9b14..266a78e4 100644 --- a/sp_HumanEvents/sp_HumanEvents.sql +++ b/sp_HumanEvents/sp_HumanEvents.sql @@ -88,8 +88,8 @@ SET XACT_ABORT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT - @version = '7.4', - @version_date = '20260401'; + @version = '7.5', + @version_date = '20260420'; IF @help = 1 BEGIN diff --git a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql index eb90d291..10318bed 100644 --- a/sp_HumanEvents/sp_HumanEventsBlockViewer.sql +++ b/sp_HumanEvents/sp_HumanEventsBlockViewer.sql @@ -93,8 +93,8 @@ SET XACT_ABORT OFF; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT - @version = '5.4', - @version_date = '20260401'; + @version = '5.5', + @version_date = '20260420'; IF @help = 1 BEGIN diff --git a/sp_IndexCleanup/sp_IndexCleanup.sql b/sp_IndexCleanup/sp_IndexCleanup.sql index b6d3e05a..9a4fb2e3 100644 --- a/sp_IndexCleanup/sp_IndexCleanup.sql +++ b/sp_IndexCleanup/sp_IndexCleanup.sql @@ -72,8 +72,8 @@ BEGIN SET NOCOUNT ON; BEGIN TRY SELECT - @version = '2.4', - @version_date = '20260401'; + @version = '2.5', + @version_date = '20260420'; IF /* Check SQL Server 2012+ for FORMAT and CONCAT functions */ diff --git a/sp_LogHunter/sp_LogHunter.sql b/sp_LogHunter/sp_LogHunter.sql index 10001bea..c80717b2 100644 --- a/sp_LogHunter/sp_LogHunter.sql +++ b/sp_LogHunter/sp_LogHunter.sql @@ -73,8 +73,8 @@ SET DATEFORMAT MDY; BEGIN SELECT - @version = '3.4', - @version_date = '20260401'; + @version = '3.5', + @version_date = '20260420'; IF @help = 1 BEGIN diff --git a/sp_PerfCheck/sp_PerfCheck.sql b/sp_PerfCheck/sp_PerfCheck.sql index 0a18f970..9ecf71ab 100644 --- a/sp_PerfCheck/sp_PerfCheck.sql +++ b/sp_PerfCheck/sp_PerfCheck.sql @@ -64,8 +64,8 @@ BEGIN Set version information */ SELECT - @version = N'2.4', - @version_date = N'20260401'; + @version = N'2.5', + @version_date = N'20260420'; /* Help section, for help. diff --git a/sp_PressureDetector/sp_PressureDetector.sql b/sp_PressureDetector/sp_PressureDetector.sql index 42410274..f858ed79 100644 --- a/sp_PressureDetector/sp_PressureDetector.sql +++ b/sp_PressureDetector/sp_PressureDetector.sql @@ -78,8 +78,8 @@ SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SET LANGUAGE us_english; SELECT - @version = '6.4', - @version_date = '20260401'; + @version = '6.5', + @version_date = '20260420'; IF @help = 1 diff --git a/sp_QueryReproBuilder/sp_QueryReproBuilder.sql b/sp_QueryReproBuilder/sp_QueryReproBuilder.sql index 7f15e28c..880602ba 100644 --- a/sp_QueryReproBuilder/sp_QueryReproBuilder.sql +++ b/sp_QueryReproBuilder/sp_QueryReproBuilder.sql @@ -83,8 +83,8 @@ BEGIN TRY /*Version*/ SELECT - @version = '1.4', - @version_date = '20260401'; + @version = '1.5', + @version_date = '20260420'; /*Help*/ IF @help = 1 diff --git a/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql b/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql index 6e3d1384..9d5cdbb7 100644 --- a/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql +++ b/sp_QueryStoreCleanup/sp_QueryStoreCleanup.sql @@ -53,8 +53,8 @@ BEGIN SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT - @version = '1.4', - @version_date = '20260401'; + @version = '1.5', + @version_date = '20260420'; /* Help section diff --git a/sp_QuickieCache/sp_QuickieCache.sql b/sp_QuickieCache/sp_QuickieCache.sql index 5a63364f..e08f371d 100644 --- a/sp_QuickieCache/sp_QuickieCache.sql +++ b/sp_QuickieCache/sp_QuickieCache.sql @@ -76,8 +76,8 @@ BEGIN SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT - @version = '1.4', - @version_date = '20260401'; + @version = '1.5', + @version_date = '20260420'; /* ╔══════════════════════════════════════════════════╗ diff --git a/sp_QuickieStore/sp_QuickieStore.sql b/sp_QuickieStore/sp_QuickieStore.sql index e88c779c..ffa459bf 100644 --- a/sp_QuickieStore/sp_QuickieStore.sql +++ b/sp_QuickieStore/sp_QuickieStore.sql @@ -126,8 +126,8 @@ BEGIN TRY These are for your outputs. */ SELECT - @version = '6.4', - @version_date = '20260401'; + @version = '6.5', + @version_date = '20260420'; /* Helpful section! For help.