diff --git a/Workbooks/Database watcher/Azure SQL Database/database/activity/activity.workbook b/Workbooks/Database watcher/Azure SQL Database/database/activity/activity.workbook index b1e2411c0d..0459a0f33e 100644 --- a/Workbooks/Database watcher/Azure SQL Database/database/activity/activity.workbook +++ b/Workbooks/Database watcher/Azure SQL Database/database/activity/activity.workbook @@ -311,7 +311,7 @@ "type": 3, "content": { "version": "KqlItem/1.0", - "query": "{\"version\":\"AzureDataExplorerQuery/1.0\",\"queryText\":\"let max_missing_indexes_per_query_hash = 30;\\r\\n// The missing indexes table may not exist. Work around by using a same-schema stub in a fuzzy union.\\r\\nlet missing_indexes_stub = view() {\\r\\nprint query_hash = \\\"\\\", missing_index_label = \\\"\\\", create_index_statements = \\\"\\\"\\r\\n| where isnotempty(query_hash)\\r\\n};\\r\\nlet missing_indexes_table = view() {\\r\\nsqldb_database_missing_indexes\\r\\n| where sample_time_utc between ({timeRange:start} .. {timeRange:end})\\r\\n| where sample_time_utc <= todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where logical_server_name =~ @\\\"{serverName}\\\"\\r\\n| where database_name == @\\\"{databaseName}\\\"\\r\\n| where ({haReplica} and replica_type == \\\"HA secondary\\\") or (not ({haReplica}) and replica_type != \\\"HA secondary\\\")\\r\\n| where grain_type == \\\"query_plan\\\"\\r\\n// Use the last missing index sample within the relevant interval\\r\\n| summarize arg_max(sample_time_utc, schema_name, object_name, database_engine_start_time_utc, avg_user_impact, equality_columns, inequality_columns, included_columns, user_seeks, user_scans, last_user_seek_utc, last_user_scan_utc)\\r\\n by logical_server_name, database_name, query_hash, query_plan_hash\\r\\n// Remove dups that may be present in different plans\\r\\n| summarize sample_time_utc = min(sample_time_utc),\\r\\n database_engine_start_time_utc = min(database_engine_start_time_utc),\\r\\n avg_user_impact = max(avg_user_impact),\\r\\n user_seeks = sum(user_seeks),\\r\\n user_scans = sum(user_scans),\\r\\n last_user_seek_utc = max(last_user_seek_utc),\\r\\n last_user_scan_utc = max(last_user_scan_utc),\\r\\n query_plan_hash = take_any(query_plan_hash),\\r\\n count_plans = dcount(query_plan_hash)\\r\\n by logical_server_name, database_name, query_hash, schema_name, object_name, equality_columns, inequality_columns, included_columns\\r\\n| extend duration_since_startup_seconds = 1s * datetime_diff(\\\"second\\\", sample_time_utc, database_engine_start_time_utc),\\r\\n query_plan_hash = iif(count_plans == 1, tostring(query_plan_hash), strcat(query_plan_hash, \\\" (+\\\", tostring(count_plans - 1), \\\")\\\"))\\r\\n| where not(schema_name has_any(\\\"[\\\",\\\"]\\\")) and not(object_name has_any(\\\"[\\\",\\\"]\\\"))\\r\\n| extend create_index_statement = strcat(\\r\\n\\\"/* \\\",\\r\\n\\\"\\\\r\\\\nLogical server: \\\", logical_server_name,\\r\\n\\\"\\\\r\\\\nDatabase: \\\", database_name,\\r\\n\\\"\\\\r\\\\nQuery hash: \\\", query_hash,\\r\\n\\\"\\\\r\\\\nQuery plan hash: \\\", query_plan_hash,\\r\\n\\\"\\\\r\\\\nAverage impact (%): \\\", avg_user_impact,\\r\\n\\\"\\\\r\\\\nSeeks: \\\", user_seeks,\\r\\n\\\"\\\\r\\\\nScans: \\\", user_scans,\\r\\n\\\"\\\\r\\\\nLast seek time (UTC): \\\", iif(isnotempty(last_user_seek_utc), format_datetime(last_user_seek_utc, \\\"yyyy-MM-dd HH:mm:ss.fff\\\"), \\\"N/A\\\"),\\r\\n\\\"\\\\r\\\\nLast scan time (UTC): \\\", iif(isnotempty(last_user_scan_utc), format_datetime(last_user_scan_utc, \\\"yyyy-MM-dd HH:mm:ss.fff\\\"), \\\"N/A\\\"),\\r\\n\\\"\\\\r\\\\nInterval duration: \\\", strcat(format_timespan(duration_since_startup_seconds, \\\"d\\\"), \\\" days, \\\", format_timespan(duration_since_startup_seconds, \\\"hh\\\"), \\\" hours, \\\", format_timespan(duration_since_startup_seconds, \\\"m\\\"), \\\" minutes\\\"),\\r\\n\\\"\\\\r\\\\n*/\\\",\\r\\n\\\"\\\\r\\\\nCREATE INDEX [replace-with-index-name] ON [\\\", schema_name, \\\"].[\\\", object_name, \\\"]\\\",\\r\\n\\\"\\\\r\\\\n(\\\",\\r\\niif(isnotempty(equality_columns), strcat(\\\"\\\\r\\\\n\\\", equality_columns, iif(isnotempty(inequality_columns), \\\",\\\", \\\"\\\"), \\\" /* equality columns */\\\"), \\\"\\\"),\\r\\niif(isnotempty(inequality_columns), strcat(\\\"\\\\r\\\\n\\\", inequality_columns, \\\" /* inequality columns */\\\"), \\\"\\\"),\\r\\n\\\"\\\\r\\\\n)\\\",\\r\\niif(isnotempty(included_columns), strcat(\\\"\\\\r\\\\nINCLUDE (\\\", included_columns, \\\")\\\"), \\\"\\\"),\\r\\n\\\"\\\\r\\\\nWITH /* Adjust index options as needed */\\\",\\r\\n\\\"\\\\r\\\\n(\\\",\\r\\n\\\"\\\\r\\\\nMAXDOP = 8,\\\",\\r\\n\\\"\\\\r\\\\nONLINE = ON (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 5 MINUTES, ABORT_AFTER_WAIT = SELF)),\\\",\\r\\n\\\"\\\\r\\\\n-- RESUMABLE = ON,\\\",\\r\\n\\\"\\\\r\\\\n-- DATA_COMPRESSION = ROW,\\\",\\r\\n\\\"\\\\r\\\\n-- DATA_COMPRESSION = PAGE,\\\",\\r\\n\\\"\\\\r\\\\nSORT_IN_TEMPDB = ON\\\",\\r\\n\\\"\\\\r\\\\n);\\\",\\r\\n\\\"\\\\r\\\\n\\\"\\r\\n)\\r\\n| summarize count_missing_indexes = min_of(count(), max_missing_indexes_per_query_hash),\\r\\n statements = make_list(create_index_statement, max_missing_indexes_per_query_hash) by query_hash\\r\\n| project query_hash,\\r\\n count_missing_indexes,\\r\\n create_index_statements = strcat(\\r\\n\\\"/*\\\",\\r\\n\\\"\\\\r\\\\nIMPORTANT:\\\",\\r\\n\\\"\\\\r\\\\nCREATE INDEX statements in this script are tentative suggestions.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nBefore creating indexes based on these suggestions:\\\",\\r\\n\\\"\\\\r\\\\n1. Examine existing indexes. If possible, modify them instead of creating a new index.\\\",\\r\\n\\\"\\\\r\\\\n2. When there are multiple similar index suggestions, consider combining them into one index.\\\",\\r\\n\\\"\\\\r\\\\n3. Always test and validate that a new or modified index does improve performance.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nAvoid creating many little-used indexes. That slows down inserts, updates,\\\",\\r\\n\\\"\\\\r\\\\nand deletes substantially, and increases storage and memory consumption.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nFor more information, see https://go.microsoft.com/fwlink/?linkid=2245704.\\\",\\r\\n\\\"\\\\r\\\\n*/\\\\r\\\\n\\\\r\\\\n\\\",\\r\\nstrcat_array(statements, \\\"\\\\r\\\\n\\\")\\r\\n)\\r\\n};\\r\\nlet missing_indexes = (\\r\\nunion isfuzzy=true\\r\\n(missing_indexes_stub),\\r\\n(missing_indexes_table)\\r\\n);\\r\\n// The sample to be displayed\\r\\nlet current_snapshot = materialize(\\r\\nsqldb_database_active_sessions\\r\\n| where sample_time_utc == todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where logical_server_name =~ @\\\"{serverName}\\\"\\r\\n| where database_name == @\\\"{databaseName}\\\"\\r\\n| where ({haReplica} and replica_type == \\\"HA secondary\\\") or (not ({haReplica}) and replica_type != \\\"HA secondary\\\")\\r\\n| project-away logical_server_name, database_name, logical_database_id, physical_database_id, replica_type\\r\\n| extend blocking_session_id = iif(\\r\\n blocking_session_id in (0, -4, -5), // Do not treat long latch waits as relevant blocking\\r\\n int(null),\\r\\n blocking_session_id\\r\\n )\\r\\n);\\r\\n// Blocking-related columns\\r\\nlet b = materialize(\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, blocking_session_id\\r\\n);\\r\\n// Assumption: blocking chains are at most 9 levels deep. If proven incorrect, introduce additional lookups\\r\\nlet head_blockers = materialize(\\r\\nb\\r\\n| lookup (b | project session_id, blocking_session_id2 = blocking_session_id) on $left.blocking_session_id == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id3 = blocking_session_id) on $left.blocking_session_id2 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id4 = blocking_session_id) on $left.blocking_session_id3 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id5 = blocking_session_id) on $left.blocking_session_id4 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id6 = blocking_session_id) on $left.blocking_session_id5 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id7 = blocking_session_id) on $left.blocking_session_id6 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id8 = blocking_session_id) on $left.blocking_session_id7 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id9 = blocking_session_id) on $left.blocking_session_id8 == $right.session_id\\r\\n| project session_id,\\r\\n request_id,\\r\\n head_blocking_session_id = coalesce(\\r\\n blocking_session_id9,\\r\\n blocking_session_id8,\\r\\n blocking_session_id7,\\r\\n blocking_session_id6,\\r\\n blocking_session_id5,\\r\\n blocking_session_id4,\\r\\n blocking_session_id3,\\r\\n blocking_session_id2,\\r\\n blocking_session_id\\r\\n )\\r\\n| where isnotempty(head_blocking_session_id)\\r\\n);\\r\\n// column labels for each grid\\r\\nlet column_labels_string = dynamic(\\r\\n[\\r\\n\\\"session_status\\\",\\\"workload_group_name\\\",\\\"connection_id\\\",\\\"encrypt_option\\\",\\\"wait_resource\\\",\\\"transaction_isolation_level\\\",\\\"query_hash\\\",\\\"query_plan_hash\\\",\\\"session_cpu_time\\\",\\\"session_duration\\\",\\\"request_cpu_time\\\",\\\"sql_module_name\\\"\\r\\n]);\\r\\nlet column_labels_number_count = dynamic(\\r\\n[\\r\\n\\\"session_logical_reads\\\",\\\"session_writes\\\",\\\"session_row_count\\\",\\\"request_row_count\\\",\\\"dop\\\",\\\"parallel_worker_count\\\",\\\"request_writes\\\"\\r\\n]);\\r\\nlet column_labels_number_id = dynamic(\\r\\n[\\r\\n\\\"database_id\\\",\\\"workload_group_id\\\",\\\"request_id\\\",\\\"sql_module_object_id\\\",\\\"blocking_session_id\\\"\\r\\n]);\\r\\nlet column_labels_number_kb = dynamic(\\r\\n[\\r\\n\\\"granted_memory\\\",\\\"requested_memory\\\",\\\"max_used_memory\\\",\\\"ideal_memory\\\"\\r\\n]);\\r\\nlet column_labels_datetime = dynamic(\\r\\n[\\r\\n\\\"login_time\\\",\\\"request_start_time\\\"\\r\\n]);\\r\\nlet column_labels_task_counts = dynamic(\\r\\n[\\r\\n\\\"pending\\\", \\\"runnable\\\", \\\"running\\\", \\\"suspended\\\", \\\"done\\\", \\\"spinloop\\\"\\r\\n]);\\r\\n// result set for strings grid\\r\\nlet session_details_string = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_status, workload_group_name, connection_id, encrypt_option, wait_resource, transaction_isolation_level, query_hash, query_plan_hash, session_cpu_time = format_timespan(session_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), session_duration = format_timespan(session_total_elapsed_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), request_cpu_time = format_timespan(request_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), sql_module_name\\r\\n| project session_id, request_id, columns = pack_all() // put all columns into a property bag\\r\\n| mv-apply column_label = column_labels_string to typeof(string) on (project column_label, column_value = tostring(columns[column_label])) // create a row for each name/value pair\\r\\n| project-away columns\\r\\n| sort by column_label asc //make_list later will preserve sort order\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value) // introduce \\\"Property\\\" and \\\"Value\\\" column names\\r\\n| summarize details_string = dynamic_to_json(make_list(c)) by session_id, request_id // for each session_id and request_id, aggregate rowset into a json array of rows, to be used as grid source\\r\\n);\\r\\n// result set for count numbers grid\\r\\nlet session_details_number_count = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_logical_reads, session_writes, session_row_count, request_row_count, dop, parallel_worker_count, request_writes\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_count to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_count = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for id numbers grid\\r\\nlet session_details_number_id = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, database_id, workload_group_id, sql_module_object_id, blocking_session_id\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_id to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_id = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for byte numbers grid\\r\\nlet session_details_number_kb = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, granted_memory = memory_grant_kb, requested_memory = requested_memory_kb, max_used_memory = max_used_memory_kb, ideal_memory = ideal_memory_kb\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_kb to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_kb = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for datetime grid\\r\\nlet session_details_datetime = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, login_time, request_start_time\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_datetime to typeof(string) on (project column_label, column_value = todatetime(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_datetime = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\nlet session_details_task_counts = ( \\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, pending = pending_task_count, runnable = runnable_task_count, running = running_task_count, suspended = suspended_task_count, done = done_task_count, spinloop = spinloop_task_count\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_task_counts to typeof(string) on (project column_label, column_value = toint(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Task type\\\", column_label, \\\"Count\\\", column_value)\\r\\n| summarize task_counts = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// return last snapshot with json arrays for each detail grid\\r\\ncurrent_snapshot\\r\\n| lookup head_blockers on $left.session_id == $right.session_id and $left.request_id == $right.request_id\\r\\n| join session_details_string on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_count on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_id on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_kb on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_datetime on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_task_counts on session_id and $left.request_id == $right.request_id\\r\\n| join kind=leftouter (\\r\\n head_blockers\\r\\n | summarize count_blocked_sessions = count() by head_blocking_session_id\\r\\n ) on $left.session_id == $right.head_blocking_session_id\\r\\n| lookup missing_indexes on query_hash\\r\\n| project request_duration = format_timespan(1ms * request_total_elapsed_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n session_id,\\r\\n request_status,\\r\\n command,\\r\\n input_buffer_text,\\r\\n blocked_by = head_blocking_session_id,\\r\\n count_blocked_sessions,\\r\\n count_missing_indexes,\\r\\n request_normalized_cpu_time = toreal(request_cpu_time_ms) / toreal(request_total_elapsed_time_ms),\\r\\n request_normalized_logical_reads = toreal(request_logical_reads) / toreal(request_total_elapsed_time_ms),\\r\\n dop,\\r\\n wait_type,\\r\\n wait_time = format_timespan(1ms * iif(wait_time_ms != 0, wait_time_ms, int(null)), \\\"d.hh:mm:ss.fff\\\"),\\r\\n open_transaction_count,\\r\\n request_id,\\r\\n percent_complete = iif(percent_complete == 0, real(null), percent_complete),\\r\\n tempdb_allocations_kb,\\r\\n tempdb_current_kb,\\r\\n sql_module_name,\\r\\n login_name,\\r\\n host_name,\\r\\n program_name,\\r\\n client_net_address,\\r\\n request_cpu_time = format_timespan(1ms * request_cpu_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n request_total_elapsed_time_ms,\\r\\n request_logical_reads,\\r\\n statement_text_json = dynamic_to_json(pack(\\\"statement_text\\\", statement_text)),\\r\\n input_buffer_text_json = dynamic_to_json(pack(\\\"input_buffer_text\\\", input_buffer_text)),\\r\\n session_wait_stats = iif(isempty(session_wait_stats), '[{\\\"wait_type\\\":\\\"\\\",\\\"wait_time_ms\\\":\\\"\\\",\\\"signal_wait_time_ms\\\":\\\"\\\",\\\"max_wait_time_ms\\\":\\\"\\\",\\\"waiting_tasks_count\\\":\\\"\\\"}]', session_wait_stats),\\r\\n details_datetime,\\r\\n details_number_id,\\r\\n details_number_kb,\\r\\n details_string,\\r\\n details_number_count,\\r\\n task_counts,\\r\\n create_index_statements\\r\\n| extend request_normalized_cpu_time = iif(isfinite(request_normalized_cpu_time), request_normalized_cpu_time, real(null)),\\r\\n request_normalized_logical_reads = iif(isfinite(request_normalized_logical_reads), request_normalized_logical_reads, real(null))\\r\\n| where \\\"{blockedFilter}\\\" == \\\"all\\\"\\r\\n or\\r\\n (\\\"{blockedFilter}\\\" == \\\"blocked\\\" and (isnotnull(blocked_by) or isnotnull(count_blocked_sessions)))\\r\\n| sort by count_blocked_sessions desc nulls last, request_total_elapsed_time_ms desc nulls last // blockers first, followed by sessions with longest running requests, then longest running sessions\\r\\n\",\"clusterName\":\"{adxClusterUri}\",\"databaseName\":\"{adxDatabase}\"}", + "query": "{\"version\":\"AzureDataExplorerQuery/1.0\",\"queryText\":\"let max_missing_indexes_per_query_hash = 30;\\r\\n// The missing indexes table may not exist. Work around by using a same-schema stub in a fuzzy union.\\r\\nlet missing_indexes_stub = view() {\\r\\nprint query_hash = \\\"\\\", missing_index_label = \\\"\\\", create_index_statements = \\\"\\\"\\r\\n| where isnotempty(query_hash)\\r\\n};\\r\\nlet missing_indexes_table = view() {\\r\\nsqldb_database_missing_indexes\\r\\n| where sample_time_utc between ({timeRange:start} .. {timeRange:end})\\r\\n| where sample_time_utc <= todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where logical_server_name =~ @\\\"{serverName}\\\"\\r\\n| where database_name == @\\\"{databaseName}\\\"\\r\\n| where ({haReplica} and replica_type == \\\"HA secondary\\\") or (not ({haReplica}) and replica_type != \\\"HA secondary\\\")\\r\\n| where grain_type == \\\"query_plan\\\"\\r\\n// Use the last missing index sample within the relevant interval\\r\\n| summarize arg_max(sample_time_utc, schema_name, object_name, database_engine_start_time_utc, avg_user_impact, equality_columns, inequality_columns, included_columns, user_seeks, user_scans, last_user_seek_utc, last_user_scan_utc)\\r\\n by logical_server_name, database_name, query_hash, query_plan_hash\\r\\n// Remove dups that may be present in different plans\\r\\n| summarize sample_time_utc = min(sample_time_utc),\\r\\n database_engine_start_time_utc = min(database_engine_start_time_utc),\\r\\n avg_user_impact = max(avg_user_impact),\\r\\n user_seeks = sum(user_seeks),\\r\\n user_scans = sum(user_scans),\\r\\n last_user_seek_utc = max(last_user_seek_utc),\\r\\n last_user_scan_utc = max(last_user_scan_utc),\\r\\n query_plan_hash = take_any(query_plan_hash),\\r\\n count_plans = dcount(query_plan_hash)\\r\\n by logical_server_name, database_name, query_hash, schema_name, object_name, equality_columns, inequality_columns, included_columns\\r\\n| extend duration_since_startup_seconds = 1s * datetime_diff(\\\"second\\\", sample_time_utc, database_engine_start_time_utc),\\r\\n query_plan_hash = iif(count_plans == 1, tostring(query_plan_hash), strcat(query_plan_hash, \\\" (+\\\", tostring(count_plans - 1), \\\")\\\"))\\r\\n| where not(schema_name has_any(\\\"[\\\",\\\"]\\\")) and not(object_name has_any(\\\"[\\\",\\\"]\\\"))\\r\\n| extend create_index_statement = strcat(\\r\\n\\\"/* \\\",\\r\\n\\\"\\\\r\\\\nLogical server: \\\", logical_server_name,\\r\\n\\\"\\\\r\\\\nDatabase: \\\", database_name,\\r\\n\\\"\\\\r\\\\nQuery hash: \\\", query_hash,\\r\\n\\\"\\\\r\\\\nQuery plan hash: \\\", query_plan_hash,\\r\\n\\\"\\\\r\\\\nAverage impact (%): \\\", avg_user_impact,\\r\\n\\\"\\\\r\\\\nSeeks: \\\", user_seeks,\\r\\n\\\"\\\\r\\\\nScans: \\\", user_scans,\\r\\n\\\"\\\\r\\\\nLast seek time (UTC): \\\", iif(isnotempty(last_user_seek_utc), format_datetime(last_user_seek_utc, \\\"yyyy-MM-dd HH:mm:ss.fff\\\"), \\\"N/A\\\"),\\r\\n\\\"\\\\r\\\\nLast scan time (UTC): \\\", iif(isnotempty(last_user_scan_utc), format_datetime(last_user_scan_utc, \\\"yyyy-MM-dd HH:mm:ss.fff\\\"), \\\"N/A\\\"),\\r\\n\\\"\\\\r\\\\nInterval duration: \\\", strcat(format_timespan(duration_since_startup_seconds, \\\"d\\\"), \\\" days, \\\", format_timespan(duration_since_startup_seconds, \\\"hh\\\"), \\\" hours, \\\", format_timespan(duration_since_startup_seconds, \\\"m\\\"), \\\" minutes\\\"),\\r\\n\\\"\\\\r\\\\n*/\\\",\\r\\n\\\"\\\\r\\\\nCREATE INDEX [replace-with-index-name] ON [\\\", schema_name, \\\"].[\\\", object_name, \\\"]\\\",\\r\\n\\\"\\\\r\\\\n(\\\",\\r\\niif(isnotempty(equality_columns), strcat(\\\"\\\\r\\\\n\\\", equality_columns, iif(isnotempty(inequality_columns), \\\",\\\", \\\"\\\"), \\\" /* equality columns */\\\"), \\\"\\\"),\\r\\niif(isnotempty(inequality_columns), strcat(\\\"\\\\r\\\\n\\\", inequality_columns, \\\" /* inequality columns */\\\"), \\\"\\\"),\\r\\n\\\"\\\\r\\\\n)\\\",\\r\\niif(isnotempty(included_columns), strcat(\\\"\\\\r\\\\nINCLUDE (\\\", included_columns, \\\")\\\"), \\\"\\\"),\\r\\n\\\"\\\\r\\\\nWITH /* Adjust index options as needed */\\\",\\r\\n\\\"\\\\r\\\\n(\\\",\\r\\n\\\"\\\\r\\\\nMAXDOP = 8,\\\",\\r\\n\\\"\\\\r\\\\nONLINE = ON (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 5 MINUTES, ABORT_AFTER_WAIT = SELF)),\\\",\\r\\n\\\"\\\\r\\\\n-- RESUMABLE = ON,\\\",\\r\\n\\\"\\\\r\\\\n-- DATA_COMPRESSION = ROW,\\\",\\r\\n\\\"\\\\r\\\\n-- DATA_COMPRESSION = PAGE,\\\",\\r\\n\\\"\\\\r\\\\nSORT_IN_TEMPDB = ON\\\",\\r\\n\\\"\\\\r\\\\n);\\\",\\r\\n\\\"\\\\r\\\\n\\\"\\r\\n)\\r\\n| summarize count_missing_indexes = min_of(count(), max_missing_indexes_per_query_hash),\\r\\n statements = make_list(create_index_statement, max_missing_indexes_per_query_hash) by query_hash\\r\\n| project query_hash,\\r\\n count_missing_indexes,\\r\\n create_index_statements = strcat(\\r\\n\\\"/*\\\",\\r\\n\\\"\\\\r\\\\nIMPORTANT:\\\",\\r\\n\\\"\\\\r\\\\nCREATE INDEX statements in this script are tentative suggestions.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nBefore creating indexes based on these suggestions:\\\",\\r\\n\\\"\\\\r\\\\n1. Examine existing indexes. If possible, modify them instead of creating a new index.\\\",\\r\\n\\\"\\\\r\\\\n2. When there are multiple similar index suggestions, consider combining them into one index.\\\",\\r\\n\\\"\\\\r\\\\n3. Always test and validate that a new or modified index does improve performance.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nAvoid creating many little-used indexes. That slows down inserts, updates,\\\",\\r\\n\\\"\\\\r\\\\nand deletes substantially, and increases storage and memory consumption.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nFor more information, see https://go.microsoft.com/fwlink/?linkid=2245704.\\\",\\r\\n\\\"\\\\r\\\\n*/\\\\r\\\\n\\\\r\\\\n\\\",\\r\\nstrcat_array(statements, \\\"\\\\r\\\\n\\\")\\r\\n)\\r\\n};\\r\\nlet missing_indexes = (\\r\\nunion isfuzzy=true\\r\\n(missing_indexes_stub),\\r\\n(missing_indexes_table)\\r\\n);\\r\\n// The sample to be displayed\\r\\nlet current_snapshot = materialize(\\r\\nsqldb_database_active_sessions\\r\\n| where sample_time_utc == todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where logical_server_name =~ @\\\"{serverName}\\\"\\r\\n| where database_name == @\\\"{databaseName}\\\"\\r\\n| where ({haReplica} and replica_type == \\\"HA secondary\\\") or (not ({haReplica}) and replica_type != \\\"HA secondary\\\")\\r\\n| project-away logical_server_name, database_name, logical_database_id, physical_database_id, replica_type\\r\\n| extend blocking_session_id = iif(\\r\\n blocking_session_id in (0, -4, -5), // Do not treat long latch waits as relevant blocking\\r\\n int(null),\\r\\n blocking_session_id\\r\\n )\\r\\n);\\r\\n// Blocking-related columns\\r\\nlet b = materialize(\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, blocking_session_id\\r\\n);\\r\\n// Assumption: blocking chains are at most 9 levels deep. If proven incorrect, introduce additional lookups\\r\\nlet head_blockers = materialize(\\r\\nb\\r\\n| lookup (b | project session_id, blocking_session_id2 = blocking_session_id) on $left.blocking_session_id == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id3 = blocking_session_id) on $left.blocking_session_id2 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id4 = blocking_session_id) on $left.blocking_session_id3 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id5 = blocking_session_id) on $left.blocking_session_id4 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id6 = blocking_session_id) on $left.blocking_session_id5 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id7 = blocking_session_id) on $left.blocking_session_id6 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id8 = blocking_session_id) on $left.blocking_session_id7 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id9 = blocking_session_id) on $left.blocking_session_id8 == $right.session_id\\r\\n| project session_id,\\r\\n request_id,\\r\\n head_blocking_session_id = coalesce(\\r\\n blocking_session_id9,\\r\\n blocking_session_id8,\\r\\n blocking_session_id7,\\r\\n blocking_session_id6,\\r\\n blocking_session_id5,\\r\\n blocking_session_id4,\\r\\n blocking_session_id3,\\r\\n blocking_session_id2,\\r\\n blocking_session_id\\r\\n )\\r\\n| where isnotempty(head_blocking_session_id)\\r\\n);\\r\\n// column labels for each grid\\r\\nlet column_labels_string = dynamic(\\r\\n[\\r\\n\\\"session_status\\\",\\\"workload_group_name\\\",\\\"connection_id\\\",\\\"encrypt_option\\\",\\\"wait_resource\\\",\\\"transaction_isolation_level\\\",\\\"query_hash\\\",\\\"query_plan_hash\\\",\\\"session_cpu_time\\\",\\\"session_duration\\\",\\\"request_cpu_time\\\",\\\"sql_module_name\\\"\\r\\n]);\\r\\nlet column_labels_number_count = dynamic(\\r\\n[\\r\\n\\\"session_logical_reads\\\",\\\"session_writes\\\",\\\"session_row_count\\\",\\\"request_row_count\\\",\\\"dop\\\",\\\"parallel_worker_count\\\",\\\"request_writes\\\"\\r\\n]);\\r\\nlet column_labels_number_id = dynamic(\\r\\n[\\r\\n\\\"database_id\\\",\\\"workload_group_id\\\",\\\"request_id\\\",\\\"sql_module_object_id\\\",\\\"blocking_session_id\\\"\\r\\n]);\\r\\nlet column_labels_number_kb = dynamic(\\r\\n[\\r\\n\\\"granted_memory\\\",\\\"requested_memory\\\",\\\"max_used_memory\\\",\\\"ideal_memory\\\"\\r\\n]);\\r\\nlet column_labels_datetime = dynamic(\\r\\n[\\r\\n\\\"login_time\\\",\\\"request_start_time\\\"\\r\\n]);\\r\\nlet column_labels_task_counts = dynamic(\\r\\n[\\r\\n\\\"pending\\\", \\\"runnable\\\", \\\"running\\\", \\\"suspended\\\", \\\"done\\\", \\\"spinloop\\\"\\r\\n]);\\r\\n// result set for strings grid\\r\\nlet session_details_string = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_status, workload_group_name, connection_id, encrypt_option, wait_resource, transaction_isolation_level, query_hash, query_plan_hash, session_cpu_time = format_timespan(session_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), session_duration = format_timespan(session_total_elapsed_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), request_cpu_time = format_timespan(request_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), sql_module_name\\r\\n| project session_id, request_id, columns = pack_all() // put all columns into a property bag\\r\\n| mv-apply column_label = column_labels_string to typeof(string) on (project column_label, column_value = tostring(columns[column_label])) // create a row for each name/value pair\\r\\n| project-away columns\\r\\n| sort by column_label asc //make_list later will preserve sort order\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value) // introduce \\\"Property\\\" and \\\"Value\\\" column names\\r\\n| summarize details_string = dynamic_to_json(make_list(c)) by session_id, request_id // for each session_id and request_id, aggregate rowset into a json array of rows, to be used as grid source\\r\\n);\\r\\n// result set for count numbers grid\\r\\nlet session_details_number_count = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_logical_reads, session_writes, session_row_count, request_row_count, dop, parallel_worker_count, request_writes\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_count to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_count = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for id numbers grid\\r\\nlet session_details_number_id = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, database_id, workload_group_id, sql_module_object_id, blocking_session_id\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_id to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_id = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for byte numbers grid\\r\\nlet session_details_number_kb = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, granted_memory = memory_grant_kb, requested_memory = requested_memory_kb, max_used_memory = max_used_memory_kb, ideal_memory = ideal_memory_kb\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_kb to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_kb = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for datetime grid\\r\\nlet session_details_datetime = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, login_time, request_start_time\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_datetime to typeof(string) on (project column_label, column_value = todatetime(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_datetime = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\nlet session_details_task_counts = ( \\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, pending = pending_task_count, runnable = runnable_task_count, running = running_task_count, suspended = suspended_task_count, done = done_task_count, spinloop = spinloop_task_count\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_task_counts to typeof(string) on (project column_label, column_value = toint(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Task type\\\", column_label, \\\"Count\\\", column_value)\\r\\n| summarize task_counts = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// return last snapshot with json arrays for each detail grid\\r\\ncurrent_snapshot\\r\\n| lookup head_blockers on $left.session_id == $right.session_id and $left.request_id == $right.request_id\\r\\n| join session_details_string on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_count on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_id on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_kb on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_datetime on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_task_counts on session_id and $left.request_id == $right.request_id\\r\\n| join kind=leftouter (\\r\\n head_blockers\\r\\n | summarize count_blocked_sessions = count() by head_blocking_session_id\\r\\n ) on $left.session_id == $right.head_blocking_session_id\\r\\n| lookup missing_indexes on query_hash\\r\\n| project request_duration = format_timespan(1ms * request_total_elapsed_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n session_id,\\r\\n request_status,\\r\\n command,\\r\\n input_buffer_text,\\r\\n blocked_by = head_blocking_session_id,\\r\\n count_blocked_sessions,\\r\\n count_missing_indexes,\\r\\n request_normalized_cpu_time = toreal(request_cpu_time_ms) / toreal(request_total_elapsed_time_ms),\\r\\n request_normalized_logical_reads = toreal(request_logical_reads) / toreal(request_total_elapsed_time_ms),\\r\\n dop,\\r\\n wait_type,\\r\\n wait_time = format_timespan(1ms * iif(wait_time_ms != 0, wait_time_ms, int(null)), \\\"d.hh:mm:ss.fff\\\"),\\r\\n open_transaction_count,\\r\\n request_id,\\r\\n percent_complete = iif(percent_complete == 0, real(null), percent_complete),\\r\\n tempdb_allocations_kb,\\r\\n tempdb_current_kb,\\r\\n sql_module_name,\\r\\n login_name,\\r\\n host_name,\\r\\n program_name,\\r\\n client_net_address,\\r\\n request_cpu_time = format_timespan(1ms * request_cpu_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n request_total_elapsed_time_ms,\\r\\n request_logical_reads,\\r\\n statement_text_json = dynamic_to_json(pack(\\\"statement_text\\\", statement_text)),\\r\\n input_buffer_text_json = dynamic_to_json(pack(\\\"input_buffer_text\\\", input_buffer_text)),\\r\\n session_wait_stats = iif(isempty(session_wait_stats), '[{\\\"wait_type\\\":\\\"\\\",\\\"wait_time_ms\\\":\\\"\\\",\\\"signal_wait_time_ms\\\":\\\"\\\",\\\"max_wait_time_ms\\\":\\\"\\\",\\\"waiting_tasks_count\\\":\\\"\\\"}]', session_wait_stats),\\r\\n details_datetime,\\r\\n details_number_id,\\r\\n details_number_kb,\\r\\n details_string,\\r\\n details_number_count,\\r\\n task_counts,\\r\\n create_index_statements,\\r\\n blocked_tooltip_text = iif(isnull(head_blocking_session_id), \\\"\\\", strcat(\\\"Session \\\", tostring(session_id), \\\" is blocked. The head blocker of the blocking chain is session \\\", tostring(head_blocking_session_id), \\\".\\\")),\\r\\n blocker_tooltip_text = iif(count_blocked_sessions > 0, strcat(\\\"Session \\\", session_id, \\\" is a head blocker in a blocking chain. It blocks \\\", count_blocked_sessions, \\\" other session(s).\\\"), \\\"\\\")\\r\\n| extend request_normalized_cpu_time = iif(isfinite(request_normalized_cpu_time), request_normalized_cpu_time, real(null)),\\r\\n request_normalized_logical_reads = iif(isfinite(request_normalized_logical_reads), request_normalized_logical_reads, real(null))\\r\\n| where \\\"{blockedFilter}\\\" == \\\"all\\\"\\r\\n or\\r\\n (\\\"{blockedFilter}\\\" == \\\"blocked\\\" and (isnotnull(blocked_by) or isnotnull(count_blocked_sessions)))\\r\\n| sort by count_blocked_sessions desc nulls last, request_total_elapsed_time_ms desc nulls last // blockers first, followed by sessions with longest running requests, then longest running sessions\\r\\n\",\"clusterName\":\"{adxClusterUri}\",\"databaseName\":\"{adxDatabase}\"}", "size": 0, "showAnalytics": true, "title": "Active sessions", @@ -549,7 +549,7 @@ "customColumnWidthSetting": "15ch" }, "tooltipFormat": { - "tooltip": "Session [\"session_id\"] is blocked. The head blocker of the blocking chain is session [\"blocked_by\"]." + "tooltip": "[\"blocked_tooltip_text\"]" } }, { @@ -574,7 +574,7 @@ "customColumnWidthSetting": "12ch" }, "tooltipFormat": { - "tooltip": "Session [\"session_id\"] is a head blocker in a blocking chain. It blocks [\"count_blocked_sessions\"] other session(s)." + "tooltip": "[\"blocker_tooltip_text\"]" } }, { @@ -795,6 +795,14 @@ { "columnMatch": "create_index_statements", "formatter": 5 + }, + { + "columnMatch": "blocked_tooltip_text", + "formatter": 5 + }, + { + "columnMatch": "blocker_tooltip_text", + "formatter": 5 } ], "filter": true, diff --git a/Workbooks/Database watcher/Azure SQL Database/elastic pool/activity/activity.workbook b/Workbooks/Database watcher/Azure SQL Database/elastic pool/activity/activity.workbook index be726b074b..b382651a99 100644 --- a/Workbooks/Database watcher/Azure SQL Database/elastic pool/activity/activity.workbook +++ b/Workbooks/Database watcher/Azure SQL Database/elastic pool/activity/activity.workbook @@ -305,7 +305,7 @@ "type": 3, "content": { "version": "KqlItem/1.0", - "query": "{\"version\":\"AzureDataExplorerQuery/1.0\",\"queryText\":\"// The sample to be displayed\\r\\nlet current_snapshot = materialize(\\r\\nsqldb_elastic_pool_active_sessions\\r\\n| where sample_time_utc == todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where logical_server_name =~ @\\\"{serverName}\\\"\\r\\n| where elastic_pool_name == @\\\"{elasticPoolName}\\\"\\r\\n{databaseNameFilter}\\r\\n| where ({haReplica} and replica_type == \\\"HA secondary\\\") or (not ({haReplica}) and replica_type != \\\"HA secondary\\\")\\r\\n| project-away logical_server_name, elastic_pool_name, anchor_database_name, anchor_database_id, anchor_logical_database_id, anchor_physical_database_id, replica_type\\r\\n| extend blocking_session_id = iif(blocking_session_id == 0, int(null), blocking_session_id)\\r\\n);\\r\\n// Blocking-related columns\\r\\nlet b = materialize(\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, blocking_session_id\\r\\n);\\r\\n// Assumption: blocking chains are at most 9 levels deep. If proven incorrect, introduce additional lookups\\r\\nlet head_blockers = materialize(\\r\\nb\\r\\n| lookup (b | project session_id, blocking_session_id2 = blocking_session_id) on $left.blocking_session_id == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id3 = blocking_session_id) on $left.blocking_session_id2 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id4 = blocking_session_id) on $left.blocking_session_id3 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id5 = blocking_session_id) on $left.blocking_session_id4 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id6 = blocking_session_id) on $left.blocking_session_id5 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id7 = blocking_session_id) on $left.blocking_session_id6 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id8 = blocking_session_id) on $left.blocking_session_id7 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id9 = blocking_session_id) on $left.blocking_session_id8 == $right.session_id\\r\\n| project session_id,\\r\\n request_id,\\r\\n head_blocking_session_id = coalesce(\\r\\n blocking_session_id9,\\r\\n blocking_session_id8,\\r\\n blocking_session_id7,\\r\\n blocking_session_id6,\\r\\n blocking_session_id5,\\r\\n blocking_session_id4,\\r\\n blocking_session_id3,\\r\\n blocking_session_id2,\\r\\n blocking_session_id\\r\\n )\\r\\n| where isnotempty(head_blocking_session_id)\\r\\n);\\r\\n// column labels for each grid\\r\\nlet column_labels_string = dynamic(\\r\\n[\\r\\n\\\"session_status\\\",\\\"workload_group_name\\\",\\\"connection_id\\\",\\\"encrypt_option\\\",\\\"wait_resource\\\",\\\"transaction_isolation_level\\\",\\\"query_hash\\\",\\\"query_plan_hash\\\",\\\"session_cpu_time\\\",\\\"session_duration\\\",\\\"request_cpu_time\\\",\\\"sql_module_name\\\"\\r\\n]);\\r\\nlet column_labels_number_count = dynamic(\\r\\n[\\r\\n\\\"session_logical_reads\\\",\\\"session_writes\\\",\\\"session_row_count\\\",\\\"request_row_count\\\",\\\"dop\\\",\\\"parallel_worker_count\\\", \\\"request_writes\\\"\\r\\n]);\\r\\nlet column_labels_number_id = dynamic(\\r\\n[\\r\\n\\\"database_id\\\",\\\"workload_group_id\\\",\\\"request_id\\\",\\\"sql_module_object_id\\\",\\\"blocking_session_id\\\"\\r\\n]);\\r\\nlet column_labels_number_kb = dynamic(\\r\\n[\\r\\n\\\"granted_memory\\\",\\\"requested_memory\\\",\\\"max_used_memory\\\",\\\"ideal_memory\\\"\\r\\n]);\\r\\nlet column_labels_datetime = dynamic(\\r\\n[\\r\\n\\\"login_time\\\",\\\"request_start_time\\\"\\r\\n]);\\r\\nlet column_labels_task_counts = dynamic(\\r\\n[\\r\\n\\\"pending\\\", \\\"runnable\\\", \\\"running\\\", \\\"suspended\\\", \\\"done\\\", \\\"spinloop\\\"\\r\\n]);\\r\\n// result set for strings grid\\r\\nlet session_details_string = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_status, workload_group_name, connection_id, encrypt_option, wait_resource, transaction_isolation_level, query_hash, query_plan_hash, session_cpu_time = format_timespan(session_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), session_duration = format_timespan(session_total_elapsed_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), request_cpu_time = format_timespan(request_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), sql_module_name\\r\\n| project session_id, request_id, columns = pack_all() // put all columns into a property bag\\r\\n| mv-apply column_label = column_labels_string to typeof(string) on (project column_label, column_value = tostring(columns[column_label])) // create a row for each name/value pair\\r\\n| project-away columns\\r\\n| sort by column_label asc //make_list later will preserve sort order\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value) // introduce \\\"Property\\\" and \\\"Value\\\" column names\\r\\n| summarize details_string = dynamic_to_json(make_list(c)) by session_id, request_id // for each session_id and request_id, aggregate rowset into a json array of rows, to be used as grid source\\r\\n);\\r\\n// result set for count numbers grid\\r\\nlet session_details_number_count = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_logical_reads, session_writes, session_row_count, request_row_count, dop, parallel_worker_count, request_writes\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_count to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_count = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for id numbers grid\\r\\nlet session_details_number_id = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, database_id, workload_group_id, sql_module_object_id, blocking_session_id\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_id to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_id = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for byte numbers grid\\r\\nlet session_details_number_kb = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, granted_memory = memory_grant_kb, requested_memory = requested_memory_kb, max_used_memory = max_used_memory_kb, ideal_memory = ideal_memory_kb\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_kb to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_kb = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for datetime grid\\r\\nlet session_details_datetime = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, login_time, request_start_time\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_datetime to typeof(string) on (project column_label, column_value = todatetime(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_datetime = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\nlet session_details_task_counts = ( \\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, pending = pending_task_count, runnable = runnable_task_count, running = running_task_count, suspended = suspended_task_count, done = done_task_count, spinloop = spinloop_task_count\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_task_counts to typeof(string) on (project column_label, column_value = toint(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Task type\\\", column_label, \\\"Count\\\", column_value)\\r\\n| summarize task_counts = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// return last snapshot with json arrays for each detail grid\\r\\ncurrent_snapshot\\r\\n| lookup head_blockers on $left.session_id == $right.session_id and $left.request_id == $right.request_id\\r\\n| join session_details_string on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_count on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_id on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_kb on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_datetime on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_task_counts on session_id and $left.request_id == $right.request_id\\r\\n| join kind=leftouter (\\r\\n head_blockers\\r\\n | summarize count_blocked_sessions = count() by head_blocking_session_id\\r\\n ) on $left.session_id == $right.head_blocking_session_id\\r\\n| project request_duration = format_timespan(1ms * request_total_elapsed_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n session_id,\\r\\n database = iif(isnotempty(database_name), database_name, tostring(database_id)),\\r\\n request_status,\\r\\n command,\\r\\n input_buffer_text,\\r\\n blocked_by = head_blocking_session_id,\\r\\n count_blocked_sessions,\\r\\n request_normalized_cpu_time = toreal(request_cpu_time_ms) / toreal(request_total_elapsed_time_ms),\\r\\n request_normalized_logical_reads = toreal(request_logical_reads) / toreal(request_total_elapsed_time_ms),\\r\\n dop,\\r\\n wait_type,\\r\\n wait_time = format_timespan(1ms * iif(wait_time_ms != 0, wait_time_ms, int(null)), \\\"d.hh:mm:ss.fff\\\"),\\r\\n open_transaction_count,\\r\\n request_id,\\r\\n percent_complete = iif(percent_complete == 0, real(null), percent_complete),\\r\\n tempdb_allocations_kb,\\r\\n tempdb_current_kb,\\r\\n sql_module_name,\\r\\n login_name,\\r\\n host_name,\\r\\n program_name,\\r\\n client_net_address,\\r\\n request_cpu_time = format_timespan(1ms * request_cpu_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n request_total_elapsed_time_ms,\\r\\n request_logical_reads,\\r\\n statement_text_json = dynamic_to_json(pack(\\\"statement_text\\\", statement_text)),\\r\\n input_buffer_text_json = dynamic_to_json(pack(\\\"input_buffer_text\\\", input_buffer_text)),\\r\\n session_wait_stats = iif(isempty(session_wait_stats), '[{\\\"wait_type\\\":\\\"\\\",\\\"wait_time_ms\\\":\\\"\\\",\\\"signal_wait_time_ms\\\":\\\"\\\",\\\"max_wait_time_ms\\\":\\\"\\\",\\\"waiting_tasks_count\\\":\\\"\\\"}]', session_wait_stats),\\r\\n details_datetime,\\r\\n details_number_id,\\r\\n details_number_kb,\\r\\n details_string,\\r\\n details_number_count,\\r\\n task_counts\\r\\n| extend request_normalized_cpu_time = iif(isfinite(request_normalized_cpu_time), request_normalized_cpu_time, real(null)),\\r\\n request_normalized_logical_reads = iif(isfinite(request_normalized_logical_reads), request_normalized_logical_reads, real(null))\\r\\n| where \\\"{blockedFilter}\\\" == \\\"all\\\"\\r\\n or\\r\\n (\\\"{blockedFilter}\\\" == \\\"blocked\\\" and (isnotnull(blocked_by) or isnotnull(count_blocked_sessions)))\\r\\n| sort by count_blocked_sessions desc nulls last, request_total_elapsed_time_ms desc nulls last // blockers first, followed by sessions with longest running requests, then longest running sessions\\r\\n\",\"clusterName\":\"{adxClusterUri}\",\"databaseName\":\"{adxDatabase}\"}", + "query": "{\"version\":\"AzureDataExplorerQuery/1.0\",\"queryText\":\"// The sample to be displayed\\r\\nlet current_snapshot = materialize(\\r\\nsqldb_elastic_pool_active_sessions\\r\\n| where sample_time_utc == todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where logical_server_name =~ @\\\"{serverName}\\\"\\r\\n| where elastic_pool_name == @\\\"{elasticPoolName}\\\"\\r\\n{databaseNameFilter}\\r\\n| where ({haReplica} and replica_type == \\\"HA secondary\\\") or (not ({haReplica}) and replica_type != \\\"HA secondary\\\")\\r\\n| project-away logical_server_name, elastic_pool_name, anchor_database_name, anchor_database_id, anchor_logical_database_id, anchor_physical_database_id, replica_type\\r\\n| extend blocking_session_id = iif(blocking_session_id == 0, int(null), blocking_session_id)\\r\\n);\\r\\n// Blocking-related columns\\r\\nlet b = materialize(\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, blocking_session_id\\r\\n);\\r\\n// Assumption: blocking chains are at most 9 levels deep. If proven incorrect, introduce additional lookups\\r\\nlet head_blockers = materialize(\\r\\nb\\r\\n| lookup (b | project session_id, blocking_session_id2 = blocking_session_id) on $left.blocking_session_id == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id3 = blocking_session_id) on $left.blocking_session_id2 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id4 = blocking_session_id) on $left.blocking_session_id3 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id5 = blocking_session_id) on $left.blocking_session_id4 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id6 = blocking_session_id) on $left.blocking_session_id5 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id7 = blocking_session_id) on $left.blocking_session_id6 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id8 = blocking_session_id) on $left.blocking_session_id7 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id9 = blocking_session_id) on $left.blocking_session_id8 == $right.session_id\\r\\n| project session_id,\\r\\n request_id,\\r\\n head_blocking_session_id = coalesce(\\r\\n blocking_session_id9,\\r\\n blocking_session_id8,\\r\\n blocking_session_id7,\\r\\n blocking_session_id6,\\r\\n blocking_session_id5,\\r\\n blocking_session_id4,\\r\\n blocking_session_id3,\\r\\n blocking_session_id2,\\r\\n blocking_session_id\\r\\n )\\r\\n| where isnotempty(head_blocking_session_id)\\r\\n);\\r\\n// column labels for each grid\\r\\nlet column_labels_string = dynamic(\\r\\n[\\r\\n\\\"session_status\\\",\\\"workload_group_name\\\",\\\"connection_id\\\",\\\"encrypt_option\\\",\\\"wait_resource\\\",\\\"transaction_isolation_level\\\",\\\"query_hash\\\",\\\"query_plan_hash\\\",\\\"session_cpu_time\\\",\\\"session_duration\\\",\\\"request_cpu_time\\\",\\\"sql_module_name\\\"\\r\\n]);\\r\\nlet column_labels_number_count = dynamic(\\r\\n[\\r\\n\\\"session_logical_reads\\\",\\\"session_writes\\\",\\\"session_row_count\\\",\\\"request_row_count\\\",\\\"dop\\\",\\\"parallel_worker_count\\\", \\\"request_writes\\\"\\r\\n]);\\r\\nlet column_labels_number_id = dynamic(\\r\\n[\\r\\n\\\"database_id\\\",\\\"workload_group_id\\\",\\\"request_id\\\",\\\"sql_module_object_id\\\",\\\"blocking_session_id\\\"\\r\\n]);\\r\\nlet column_labels_number_kb = dynamic(\\r\\n[\\r\\n\\\"granted_memory\\\",\\\"requested_memory\\\",\\\"max_used_memory\\\",\\\"ideal_memory\\\"\\r\\n]);\\r\\nlet column_labels_datetime = dynamic(\\r\\n[\\r\\n\\\"login_time\\\",\\\"request_start_time\\\"\\r\\n]);\\r\\nlet column_labels_task_counts = dynamic(\\r\\n[\\r\\n\\\"pending\\\", \\\"runnable\\\", \\\"running\\\", \\\"suspended\\\", \\\"done\\\", \\\"spinloop\\\"\\r\\n]);\\r\\n// result set for strings grid\\r\\nlet session_details_string = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_status, workload_group_name, connection_id, encrypt_option, wait_resource, transaction_isolation_level, query_hash, query_plan_hash, session_cpu_time = format_timespan(session_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), session_duration = format_timespan(session_total_elapsed_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), request_cpu_time = format_timespan(request_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), sql_module_name\\r\\n| project session_id, request_id, columns = pack_all() // put all columns into a property bag\\r\\n| mv-apply column_label = column_labels_string to typeof(string) on (project column_label, column_value = tostring(columns[column_label])) // create a row for each name/value pair\\r\\n| project-away columns\\r\\n| sort by column_label asc //make_list later will preserve sort order\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value) // introduce \\\"Property\\\" and \\\"Value\\\" column names\\r\\n| summarize details_string = dynamic_to_json(make_list(c)) by session_id, request_id // for each session_id and request_id, aggregate rowset into a json array of rows, to be used as grid source\\r\\n);\\r\\n// result set for count numbers grid\\r\\nlet session_details_number_count = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_logical_reads, session_writes, session_row_count, request_row_count, dop, parallel_worker_count, request_writes\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_count to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_count = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for id numbers grid\\r\\nlet session_details_number_id = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, database_id, workload_group_id, sql_module_object_id, blocking_session_id\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_id to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_id = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for byte numbers grid\\r\\nlet session_details_number_kb = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, granted_memory = memory_grant_kb, requested_memory = requested_memory_kb, max_used_memory = max_used_memory_kb, ideal_memory = ideal_memory_kb\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_kb to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_kb = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for datetime grid\\r\\nlet session_details_datetime = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, login_time, request_start_time\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_datetime to typeof(string) on (project column_label, column_value = todatetime(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_datetime = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\nlet session_details_task_counts = ( \\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, pending = pending_task_count, runnable = runnable_task_count, running = running_task_count, suspended = suspended_task_count, done = done_task_count, spinloop = spinloop_task_count\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_task_counts to typeof(string) on (project column_label, column_value = toint(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Task type\\\", column_label, \\\"Count\\\", column_value)\\r\\n| summarize task_counts = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// return last snapshot with json arrays for each detail grid\\r\\ncurrent_snapshot\\r\\n| lookup head_blockers on $left.session_id == $right.session_id and $left.request_id == $right.request_id\\r\\n| join session_details_string on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_count on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_id on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_kb on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_datetime on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_task_counts on session_id and $left.request_id == $right.request_id\\r\\n| join kind=leftouter (\\r\\n head_blockers\\r\\n | summarize count_blocked_sessions = count() by head_blocking_session_id\\r\\n ) on $left.session_id == $right.head_blocking_session_id\\r\\n| project request_duration = format_timespan(1ms * request_total_elapsed_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n session_id,\\r\\n database = iif(isnotempty(database_name), database_name, tostring(database_id)),\\r\\n request_status,\\r\\n command,\\r\\n input_buffer_text,\\r\\n blocked_by = head_blocking_session_id,\\r\\n count_blocked_sessions,\\r\\n request_normalized_cpu_time = toreal(request_cpu_time_ms) / toreal(request_total_elapsed_time_ms),\\r\\n request_normalized_logical_reads = toreal(request_logical_reads) / toreal(request_total_elapsed_time_ms),\\r\\n dop,\\r\\n wait_type,\\r\\n wait_time = format_timespan(1ms * iif(wait_time_ms != 0, wait_time_ms, int(null)), \\\"d.hh:mm:ss.fff\\\"),\\r\\n open_transaction_count,\\r\\n request_id,\\r\\n percent_complete = iif(percent_complete == 0, real(null), percent_complete),\\r\\n tempdb_allocations_kb,\\r\\n tempdb_current_kb,\\r\\n sql_module_name,\\r\\n login_name,\\r\\n host_name,\\r\\n program_name,\\r\\n client_net_address,\\r\\n request_cpu_time = format_timespan(1ms * request_cpu_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n request_total_elapsed_time_ms,\\r\\n request_logical_reads,\\r\\n statement_text_json = dynamic_to_json(pack(\\\"statement_text\\\", statement_text)),\\r\\n input_buffer_text_json = dynamic_to_json(pack(\\\"input_buffer_text\\\", input_buffer_text)),\\r\\n session_wait_stats = iif(isempty(session_wait_stats), '[{\\\"wait_type\\\":\\\"\\\",\\\"wait_time_ms\\\":\\\"\\\",\\\"signal_wait_time_ms\\\":\\\"\\\",\\\"max_wait_time_ms\\\":\\\"\\\",\\\"waiting_tasks_count\\\":\\\"\\\"}]', session_wait_stats),\\r\\n details_datetime,\\r\\n details_number_id,\\r\\n details_number_kb,\\r\\n details_string,\\r\\n details_number_count,\\r\\n task_counts,\\r\\n blocked_tooltip_text = iif(isnull(head_blocking_session_id), \\\"\\\", strcat(\\\"Session \\\", tostring(session_id), \\\" is blocked. The head blocker of the blocking chain is session \\\", tostring(head_blocking_session_id), \\\".\\\")),\\r\\n blocker_tooltip_text = iif(count_blocked_sessions > 0, strcat(\\\"Session \\\", session_id, \\\" is a head blocker in a blocking chain. It blocks \\\", count_blocked_sessions, \\\" other session(s).\\\"), \\\"\\\")\\r\\n| extend request_normalized_cpu_time = iif(isfinite(request_normalized_cpu_time), request_normalized_cpu_time, real(null)),\\r\\n request_normalized_logical_reads = iif(isfinite(request_normalized_logical_reads), request_normalized_logical_reads, real(null))\\r\\n| where \\\"{blockedFilter}\\\" == \\\"all\\\"\\r\\n or\\r\\n (\\\"{blockedFilter}\\\" == \\\"blocked\\\" and (isnotnull(blocked_by) or isnotnull(count_blocked_sessions)))\\r\\n| sort by count_blocked_sessions desc nulls last, request_total_elapsed_time_ms desc nulls last // blockers first, followed by sessions with longest running requests, then longest running sessions\\r\\n\",\"clusterName\":\"{adxClusterUri}\",\"databaseName\":\"{adxDatabase}\"}", "size": 0, "showAnalytics": true, "title": "Active sessions", @@ -550,7 +550,7 @@ "customColumnWidthSetting": "15ch" }, "tooltipFormat": { - "tooltip": "Session [\"session_id\"] is blocked. The head blocker of the blocking chain is session [\"blocked_by\"]." + "tooltip": "[\"blocked_tooltip_text\"]" } }, { @@ -575,7 +575,7 @@ "customColumnWidthSetting": "12ch" }, "tooltipFormat": { - "tooltip": "Session [\"session_id\"] is a head blocker in a blocking chain. It blocks [\"count_blocked_sessions\"] other session(s)." + "tooltip": "[\"blocker_tooltip_text\"]" } }, { @@ -745,6 +745,14 @@ { "columnMatch": "task_counts", "formatter": 5 + }, + { + "columnMatch": "blocked_tooltip_text", + "formatter": 5 + }, + { + "columnMatch": "blocker_tooltip_text", + "formatter": 5 } ], "filter": true, diff --git a/Workbooks/Database watcher/Azure SQL Managed Instance/instance/activity/activity.workbook b/Workbooks/Database watcher/Azure SQL Managed Instance/instance/activity/activity.workbook index cb1300322f..9b6bd4971a 100644 --- a/Workbooks/Database watcher/Azure SQL Managed Instance/instance/activity/activity.workbook +++ b/Workbooks/Database watcher/Azure SQL Managed Instance/instance/activity/activity.workbook @@ -313,7 +313,7 @@ "type": 3, "content": { "version": "KqlItem/1.0", - "query": "{\"version\":\"AzureDataExplorerQuery/1.0\",\"queryText\":\"let max_missing_indexes_per_query_hash = 30;\\r\\n// The missing indexes table may not exist. Work around by using a same-schema stub in a fuzzy union.\\r\\nlet missing_indexes_stub = view() {\\r\\nprint query_hash = \\\"\\\", missing_index_label = \\\"\\\", create_index_statements = \\\"\\\"\\r\\n| where isnotempty(query_hash)\\r\\n};\\r\\nlet missing_indexes_table = view() {\\r\\nsqlmi_missing_indexes\\r\\n| where sample_time_utc between ({timeRange:start} .. {timeRange:end})\\r\\n| where sample_time_utc <= todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where managed_instance_name =~ @\\\"{managedInstanceName}\\\"\\r\\n| where ({haReplica} and replica_type == \\\"HA secondary\\\") or (not ({haReplica}) and replica_type != \\\"HA secondary\\\")\\r\\n| where grain_type == \\\"query_plan\\\"\\r\\n// Use the last missing index sample within the relevant interval\\r\\n| summarize arg_max(sample_time_utc, schema_name, object_name, database_engine_start_time_utc, avg_user_impact, equality_columns, inequality_columns, included_columns, user_seeks, user_scans, last_user_seek_utc, last_user_scan_utc)\\r\\n by managed_instance_name, database_name, query_hash, query_plan_hash\\r\\n// Remove dups that may be present in different plans\\r\\n| summarize sample_time_utc = min(sample_time_utc),\\r\\n database_engine_start_time_utc = min(database_engine_start_time_utc),\\r\\n avg_user_impact = max(avg_user_impact),\\r\\n user_seeks = sum(user_seeks),\\r\\n user_scans = sum(user_scans),\\r\\n last_user_seek_utc = max(last_user_seek_utc),\\r\\n last_user_scan_utc = max(last_user_scan_utc),\\r\\n query_plan_hash = take_any(query_plan_hash),\\r\\n count_plans = dcount(query_plan_hash)\\r\\n by managed_instance_name, database_name, query_hash, schema_name, object_name, equality_columns, inequality_columns, included_columns\\r\\n| extend duration_since_startup_seconds = 1s * datetime_diff(\\\"second\\\", sample_time_utc, database_engine_start_time_utc),\\r\\n query_plan_hash = iif(count_plans == 1, tostring(query_plan_hash), strcat(query_plan_hash, \\\" (+\\\", tostring(count_plans - 1), \\\")\\\"))\\r\\n| where not(schema_name has_any(\\\"[\\\",\\\"]\\\")) and not(object_name has_any(\\\"[\\\",\\\"]\\\"))\\r\\n| extend create_index_statement = strcat(\\r\\n\\\"/* \\\",\\r\\n\\\"\\\\r\\\\nManaged instance: \\\", managed_instance_name,\\r\\n\\\"\\\\r\\\\nDatabase: \\\", database_name,\\r\\n\\\"\\\\r\\\\nQuery hash: \\\", query_hash,\\r\\n\\\"\\\\r\\\\nQuery plan hash: \\\", query_plan_hash,\\r\\n\\\"\\\\r\\\\nAverage impact (%): \\\", avg_user_impact,\\r\\n\\\"\\\\r\\\\nSeeks: \\\", user_seeks,\\r\\n\\\"\\\\r\\\\nScans: \\\", user_scans,\\r\\n\\\"\\\\r\\\\nLast seek time (UTC): \\\", iif(isnotempty(last_user_seek_utc), format_datetime(last_user_seek_utc, \\\"yyyy-MM-dd HH:mm:ss.fff\\\"), \\\"N/A\\\"),\\r\\n\\\"\\\\r\\\\nLast scan time (UTC): \\\", iif(isnotempty(last_user_scan_utc), format_datetime(last_user_scan_utc, \\\"yyyy-MM-dd HH:mm:ss.fff\\\"), \\\"N/A\\\"),\\r\\n\\\"\\\\r\\\\nInterval duration: \\\", strcat(format_timespan(duration_since_startup_seconds, \\\"d\\\"), \\\" days, \\\", format_timespan(duration_since_startup_seconds, \\\"hh\\\"), \\\" hours, \\\", format_timespan(duration_since_startup_seconds, \\\"m\\\"), \\\" minutes\\\"),\\r\\n\\\"\\\\r\\\\n*/\\\",\\r\\n\\\"\\\\r\\\\nCREATE INDEX [replace-with-index-name] ON [\\\", schema_name, \\\"].[\\\", object_name, \\\"]\\\",\\r\\n\\\"\\\\r\\\\n(\\\",\\r\\niif(isnotempty(equality_columns), strcat(\\\"\\\\r\\\\n\\\", equality_columns, iif(isnotempty(inequality_columns), \\\",\\\", \\\"\\\"), \\\" /* equality columns */\\\"), \\\"\\\"),\\r\\niif(isnotempty(inequality_columns), strcat(\\\"\\\\r\\\\n\\\", inequality_columns, \\\" /* inequality columns */\\\"), \\\"\\\"),\\r\\n\\\"\\\\r\\\\n)\\\",\\r\\niif(isnotempty(included_columns), strcat(\\\"\\\\r\\\\nINCLUDE (\\\", included_columns, \\\")\\\"), \\\"\\\"),\\r\\n\\\"\\\\r\\\\nWITH /* Adjust index options as needed */\\\",\\r\\n\\\"\\\\r\\\\n(\\\",\\r\\n\\\"\\\\r\\\\nMAXDOP = 8,\\\",\\r\\n\\\"\\\\r\\\\nONLINE = ON (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 5 MINUTES, ABORT_AFTER_WAIT = SELF)),\\\",\\r\\n\\\"\\\\r\\\\n-- RESUMABLE = ON,\\\",\\r\\n\\\"\\\\r\\\\n-- DATA_COMPRESSION = ROW,\\\",\\r\\n\\\"\\\\r\\\\n-- DATA_COMPRESSION = PAGE,\\\",\\r\\n\\\"\\\\r\\\\nSORT_IN_TEMPDB = ON\\\",\\r\\n\\\"\\\\r\\\\n);\\\",\\r\\n\\\"\\\\r\\\\n\\\"\\r\\n)\\r\\n| summarize count_missing_indexes = min_of(count(), max_missing_indexes_per_query_hash),\\r\\n statements = make_list(create_index_statement, max_missing_indexes_per_query_hash) by query_hash\\r\\n| project query_hash,\\r\\n count_missing_indexes,\\r\\n create_index_statements = strcat(\\r\\n\\\"/*\\\",\\r\\n\\\"\\\\r\\\\nIMPORTANT:\\\",\\r\\n\\\"\\\\r\\\\nCREATE INDEX statements in this script are tentative suggestions.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nBefore creating indexes based on these suggestions:\\\",\\r\\n\\\"\\\\r\\\\n1. Examine existing indexes. If possible, modify them instead of creating a new index.\\\",\\r\\n\\\"\\\\r\\\\n2. When there are multiple similar index suggestions, consider combining them into one index.\\\",\\r\\n\\\"\\\\r\\\\n3. Always test and validate that a new or modified index does improve performance.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nAvoid creating many little-used indexes. That slows down inserts, updates,\\\",\\r\\n\\\"\\\\r\\\\nand deletes substantially, and increases storage and memory consumption.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nFor more information, see https://go.microsoft.com/fwlink/?linkid=2245704.\\\",\\r\\n\\\"\\\\r\\\\n*/\\\\r\\\\n\\\\r\\\\n\\\",\\r\\nstrcat_array(statements, \\\"\\\\r\\\\n\\\")\\r\\n)\\r\\n};\\r\\nlet missing_indexes = (\\r\\nunion isfuzzy=true\\r\\n(missing_indexes_stub),\\r\\n(missing_indexes_table)\\r\\n);\\r\\n// The sample to be displayed\\r\\nlet current_snapshot = materialize(\\r\\nsqlmi_active_sessions\\r\\n| where sample_time_utc == todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where managed_instance_name =~ @\\\"{managedInstanceName}\\\"\\r\\n| where ({haReplica} and replica_type == \\\"HA secondary\\\") or (not ({haReplica}) and replica_type != \\\"HA secondary\\\")\\r\\n| project-away managed_instance_name\\r\\n| extend blocking_session_id = iif(\\r\\n blocking_session_id in (0, -4, -5), // Do not consider long latch waits when displaying blocking chains\\r\\n int(null),\\r\\n blocking_session_id\\r\\n )\\r\\n);\\r\\n// Blocking-related columns\\r\\nlet b = materialize(\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, blocking_session_id\\r\\n);\\r\\n// Assumption: blocking chains are at most 9 levels deep. If proven incorrect, introduce additional lookups\\r\\nlet head_blockers = materialize(\\r\\nb\\r\\n| lookup (b | project session_id, blocking_session_id2 = blocking_session_id) on $left.blocking_session_id == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id3 = blocking_session_id) on $left.blocking_session_id2 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id4 = blocking_session_id) on $left.blocking_session_id3 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id5 = blocking_session_id) on $left.blocking_session_id4 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id6 = blocking_session_id) on $left.blocking_session_id5 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id7 = blocking_session_id) on $left.blocking_session_id6 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id8 = blocking_session_id) on $left.blocking_session_id7 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id9 = blocking_session_id) on $left.blocking_session_id8 == $right.session_id\\r\\n| project session_id,\\r\\n request_id,\\r\\n head_blocking_session_id = coalesce(\\r\\n blocking_session_id9,\\r\\n blocking_session_id8,\\r\\n blocking_session_id7,\\r\\n blocking_session_id6,\\r\\n blocking_session_id5,\\r\\n blocking_session_id4,\\r\\n blocking_session_id3,\\r\\n blocking_session_id2,\\r\\n blocking_session_id\\r\\n )\\r\\n| where isnotempty(head_blocking_session_id)\\r\\n);\\r\\n// column labels for each grid\\r\\nlet column_labels_string = dynamic(\\r\\n[\\r\\n\\\"session_status\\\",\\\"workload_group_name\\\",\\\"connection_id\\\",\\\"encrypt_option\\\",\\\"wait_resource\\\",\\\"transaction_isolation_level\\\",\\\"query_hash\\\",\\\"query_plan_hash\\\",\\\"session_cpu_time\\\",\\\"session_duration\\\",\\\"request_cpu_time\\\",\\\"sql_module_name\\\"\\r\\n]);\\r\\nlet column_labels_number_count = dynamic(\\r\\n[\\r\\n\\\"session_logical_reads\\\",\\\"session_writes\\\",\\\"session_row_count\\\",\\\"request_row_count\\\",\\\"dop\\\",\\\"parallel_worker_count\\\", \\\"request_writes\\\"\\r\\n]);\\r\\nlet column_labels_number_id = dynamic(\\r\\n[\\r\\n\\\"database_id\\\",\\\"workload_group_id\\\",\\\"request_id\\\",\\\"sql_module_object_id\\\",\\\"blocking_session_id\\\"\\r\\n]);\\r\\nlet column_labels_number_kb = dynamic(\\r\\n[\\r\\n\\\"granted_memory\\\",\\\"requested_memory\\\",\\\"max_used_memory\\\",\\\"ideal_memory\\\"\\r\\n]);\\r\\nlet column_labels_datetime = dynamic(\\r\\n[\\r\\n\\\"login_time\\\",\\\"request_start_time\\\"\\r\\n]);\\r\\nlet column_labels_task_counts = dynamic(\\r\\n[\\r\\n\\\"pending\\\", \\\"runnable\\\", \\\"running\\\", \\\"suspended\\\", \\\"done\\\", \\\"spinloop\\\"\\r\\n]);\\r\\n// result set for strings grid\\r\\nlet session_details_string = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_status, workload_group_name, connection_id, encrypt_option, wait_resource, transaction_isolation_level, query_hash, query_plan_hash, session_cpu_time = format_timespan(session_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), request_cpu_time = format_timespan(request_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), session_duration = format_timespan(session_total_elapsed_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), sql_module_name\\r\\n| project session_id, request_id, columns = pack_all() // put all columns into a property bag\\r\\n| mv-apply column_label = column_labels_string to typeof(string) on (project column_label, column_value = tostring(columns[column_label])) // create a row for each name/value pair\\r\\n| project-away columns\\r\\n| sort by column_label asc //make_list later will preserve sort order\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value) // introduce \\\"Property\\\" and \\\"Value\\\" column names\\r\\n| summarize details_string = dynamic_to_json(make_list(c)) by session_id, request_id // for each session_id and request_id, aggregate rowset into a json array of rows, to be used as grid source\\r\\n);\\r\\n// result set for count numbers grid\\r\\nlet session_details_number_count = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_logical_reads, session_writes, session_row_count, request_row_count, dop, parallel_worker_count, request_writes\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_count to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_count = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for id numbers grid\\r\\nlet session_details_number_id = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, database_id, workload_group_id, sql_module_object_id, blocking_session_id\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_id to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_id = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for byte numbers grid\\r\\nlet session_details_number_kb = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, granted_memory = memory_grant_kb, requested_memory = requested_memory_kb, max_used_memory = max_used_memory_kb, ideal_memory = ideal_memory_kb\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_kb to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_kb = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for datetime grid\\r\\nlet session_details_datetime = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, login_time, request_start_time\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_datetime to typeof(string) on (project column_label, column_value = todatetime(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_datetime = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\nlet session_details_task_counts = ( \\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, pending = pending_task_count, runnable = runnable_task_count, running = running_task_count, suspended = suspended_task_count, done = done_task_count, spinloop = spinloop_task_count\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_task_counts to typeof(string) on (project column_label, column_value = toint(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Task type\\\", column_label, \\\"Count\\\", column_value)\\r\\n| summarize task_counts = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// return last snapshot with json arrays for each detail grid\\r\\ncurrent_snapshot\\r\\n| lookup head_blockers on $left.session_id == $right.session_id and $left.request_id == $right.request_id\\r\\n| join session_details_string on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_count on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_id on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_kb on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_datetime on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_task_counts on session_id and $left.request_id == $right.request_id\\r\\n| join kind=leftouter (\\r\\n head_blockers\\r\\n | summarize count_blocked_sessions = count() by head_blocking_session_id\\r\\n ) on $left.session_id == $right.head_blocking_session_id\\r\\n| lookup missing_indexes on query_hash\\r\\n| project request_duration = format_timespan(1ms * request_total_elapsed_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n session_id,\\r\\n database_name,\\r\\n request_status,\\r\\n command,\\r\\n input_buffer_text,\\r\\n blocked_by = head_blocking_session_id,\\r\\n count_blocked_sessions,\\r\\n count_missing_indexes,\\r\\n request_normalized_cpu_time = toreal(request_cpu_time_ms) / toreal(request_total_elapsed_time_ms),\\r\\n request_normalized_logical_reads = toreal(request_logical_reads) / toreal(request_total_elapsed_time_ms),\\r\\n dop,\\r\\n wait_type,\\r\\n wait_time = format_timespan(1ms * iif(wait_time_ms != 0, wait_time_ms, int(null)), \\\"d.hh:mm:ss.fff\\\"),\\r\\n open_transaction_count,\\r\\n request_id,\\r\\n percent_complete = iif(percent_complete == 0, real(null), percent_complete),\\r\\n tempdb_allocations_kb,\\r\\n tempdb_current_kb,\\r\\n sql_module_name,\\r\\n login_name,\\r\\n host_name,\\r\\n program_name,\\r\\n client_net_address,\\r\\n request_cpu_time = format_timespan(1ms * request_cpu_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n request_total_elapsed_time_ms,\\r\\n request_logical_reads,\\r\\n statement_text_json = dynamic_to_json(pack(\\\"statement_text\\\", statement_text)),\\r\\n input_buffer_text_json = dynamic_to_json(pack(\\\"input_buffer_text\\\", input_buffer_text)),\\r\\n session_wait_stats = iif(isempty(session_wait_stats), '[{\\\"wait_type\\\":\\\"\\\",\\\"wait_time_ms\\\":\\\"\\\",\\\"signal_wait_time_ms\\\":\\\"\\\",\\\"max_wait_time_ms\\\":\\\"\\\",\\\"waiting_tasks_count\\\":\\\"\\\"}]', session_wait_stats),\\r\\n details_datetime,\\r\\n details_number_id,\\r\\n details_number_kb,\\r\\n details_string,\\r\\n details_number_count,\\r\\n task_counts,\\r\\n create_index_statements\\r\\n| extend request_normalized_cpu_time = iif(isfinite(request_normalized_cpu_time), request_normalized_cpu_time, real(null)),\\r\\n request_normalized_logical_reads = iif(isfinite(request_normalized_logical_reads), request_normalized_logical_reads, real(null))\\r\\n| where \\\"{blockedFilter}\\\" == \\\"all\\\"\\r\\n or\\r\\n (\\\"{blockedFilter}\\\" == \\\"blocked\\\" and (isnotnull(blocked_by) or isnotnull(count_blocked_sessions)))\\r\\n| sort by count_blocked_sessions desc nulls last, request_total_elapsed_time_ms desc nulls last // blockers first, followed by sessions with longest running requests, then longest running sessions\\r\\n\",\"clusterName\":\"{adxClusterUri}\",\"databaseName\":\"{adxDatabase}\"}", + "query": "{\"version\":\"AzureDataExplorerQuery/1.0\",\"queryText\":\"let max_missing_indexes_per_query_hash = 30;\\r\\n// The missing indexes table may not exist. Work around by using a same-schema stub in a fuzzy union.\\r\\nlet missing_indexes_stub = view() {\\r\\nprint query_hash = \\\"\\\", missing_index_label = \\\"\\\", create_index_statements = \\\"\\\"\\r\\n| where isnotempty(query_hash)\\r\\n};\\r\\nlet missing_indexes_table = view() {\\r\\nsqlmi_missing_indexes\\r\\n| where sample_time_utc between ({timeRange:start} .. {timeRange:end})\\r\\n| where sample_time_utc <= todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where managed_instance_name =~ @\\\"{managedInstanceName}\\\"\\r\\n| where ({haReplica} and replica_type == \\\"HA secondary\\\") or (not ({haReplica}) and replica_type != \\\"HA secondary\\\")\\r\\n| where grain_type == \\\"query_plan\\\"\\r\\n// Use the last missing index sample within the relevant interval\\r\\n| summarize arg_max(sample_time_utc, schema_name, object_name, database_engine_start_time_utc, avg_user_impact, equality_columns, inequality_columns, included_columns, user_seeks, user_scans, last_user_seek_utc, last_user_scan_utc)\\r\\n by managed_instance_name, database_name, query_hash, query_plan_hash\\r\\n// Remove dups that may be present in different plans\\r\\n| summarize sample_time_utc = min(sample_time_utc),\\r\\n database_engine_start_time_utc = min(database_engine_start_time_utc),\\r\\n avg_user_impact = max(avg_user_impact),\\r\\n user_seeks = sum(user_seeks),\\r\\n user_scans = sum(user_scans),\\r\\n last_user_seek_utc = max(last_user_seek_utc),\\r\\n last_user_scan_utc = max(last_user_scan_utc),\\r\\n query_plan_hash = take_any(query_plan_hash),\\r\\n count_plans = dcount(query_plan_hash)\\r\\n by managed_instance_name, database_name, query_hash, schema_name, object_name, equality_columns, inequality_columns, included_columns\\r\\n| extend duration_since_startup_seconds = 1s * datetime_diff(\\\"second\\\", sample_time_utc, database_engine_start_time_utc),\\r\\n query_plan_hash = iif(count_plans == 1, tostring(query_plan_hash), strcat(query_plan_hash, \\\" (+\\\", tostring(count_plans - 1), \\\")\\\"))\\r\\n| where not(schema_name has_any(\\\"[\\\",\\\"]\\\")) and not(object_name has_any(\\\"[\\\",\\\"]\\\"))\\r\\n| extend create_index_statement = strcat(\\r\\n\\\"/* \\\",\\r\\n\\\"\\\\r\\\\nManaged instance: \\\", managed_instance_name,\\r\\n\\\"\\\\r\\\\nDatabase: \\\", database_name,\\r\\n\\\"\\\\r\\\\nQuery hash: \\\", query_hash,\\r\\n\\\"\\\\r\\\\nQuery plan hash: \\\", query_plan_hash,\\r\\n\\\"\\\\r\\\\nAverage impact (%): \\\", avg_user_impact,\\r\\n\\\"\\\\r\\\\nSeeks: \\\", user_seeks,\\r\\n\\\"\\\\r\\\\nScans: \\\", user_scans,\\r\\n\\\"\\\\r\\\\nLast seek time (UTC): \\\", iif(isnotempty(last_user_seek_utc), format_datetime(last_user_seek_utc, \\\"yyyy-MM-dd HH:mm:ss.fff\\\"), \\\"N/A\\\"),\\r\\n\\\"\\\\r\\\\nLast scan time (UTC): \\\", iif(isnotempty(last_user_scan_utc), format_datetime(last_user_scan_utc, \\\"yyyy-MM-dd HH:mm:ss.fff\\\"), \\\"N/A\\\"),\\r\\n\\\"\\\\r\\\\nInterval duration: \\\", strcat(format_timespan(duration_since_startup_seconds, \\\"d\\\"), \\\" days, \\\", format_timespan(duration_since_startup_seconds, \\\"hh\\\"), \\\" hours, \\\", format_timespan(duration_since_startup_seconds, \\\"m\\\"), \\\" minutes\\\"),\\r\\n\\\"\\\\r\\\\n*/\\\",\\r\\n\\\"\\\\r\\\\nCREATE INDEX [replace-with-index-name] ON [\\\", schema_name, \\\"].[\\\", object_name, \\\"]\\\",\\r\\n\\\"\\\\r\\\\n(\\\",\\r\\niif(isnotempty(equality_columns), strcat(\\\"\\\\r\\\\n\\\", equality_columns, iif(isnotempty(inequality_columns), \\\",\\\", \\\"\\\"), \\\" /* equality columns */\\\"), \\\"\\\"),\\r\\niif(isnotempty(inequality_columns), strcat(\\\"\\\\r\\\\n\\\", inequality_columns, \\\" /* inequality columns */\\\"), \\\"\\\"),\\r\\n\\\"\\\\r\\\\n)\\\",\\r\\niif(isnotempty(included_columns), strcat(\\\"\\\\r\\\\nINCLUDE (\\\", included_columns, \\\")\\\"), \\\"\\\"),\\r\\n\\\"\\\\r\\\\nWITH /* Adjust index options as needed */\\\",\\r\\n\\\"\\\\r\\\\n(\\\",\\r\\n\\\"\\\\r\\\\nMAXDOP = 8,\\\",\\r\\n\\\"\\\\r\\\\nONLINE = ON (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 5 MINUTES, ABORT_AFTER_WAIT = SELF)),\\\",\\r\\n\\\"\\\\r\\\\n-- RESUMABLE = ON,\\\",\\r\\n\\\"\\\\r\\\\n-- DATA_COMPRESSION = ROW,\\\",\\r\\n\\\"\\\\r\\\\n-- DATA_COMPRESSION = PAGE,\\\",\\r\\n\\\"\\\\r\\\\nSORT_IN_TEMPDB = ON\\\",\\r\\n\\\"\\\\r\\\\n);\\\",\\r\\n\\\"\\\\r\\\\n\\\"\\r\\n)\\r\\n| summarize count_missing_indexes = min_of(count(), max_missing_indexes_per_query_hash),\\r\\n statements = make_list(create_index_statement, max_missing_indexes_per_query_hash) by query_hash\\r\\n| project query_hash,\\r\\n count_missing_indexes,\\r\\n create_index_statements = strcat(\\r\\n\\\"/*\\\",\\r\\n\\\"\\\\r\\\\nIMPORTANT:\\\",\\r\\n\\\"\\\\r\\\\nCREATE INDEX statements in this script are tentative suggestions.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nBefore creating indexes based on these suggestions:\\\",\\r\\n\\\"\\\\r\\\\n1. Examine existing indexes. If possible, modify them instead of creating a new index.\\\",\\r\\n\\\"\\\\r\\\\n2. When there are multiple similar index suggestions, consider combining them into one index.\\\",\\r\\n\\\"\\\\r\\\\n3. Always test and validate that a new or modified index does improve performance.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nAvoid creating many little-used indexes. That slows down inserts, updates,\\\",\\r\\n\\\"\\\\r\\\\nand deletes substantially, and increases storage and memory consumption.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nFor more information, see https://go.microsoft.com/fwlink/?linkid=2245704.\\\",\\r\\n\\\"\\\\r\\\\n*/\\\\r\\\\n\\\\r\\\\n\\\",\\r\\nstrcat_array(statements, \\\"\\\\r\\\\n\\\")\\r\\n)\\r\\n};\\r\\nlet missing_indexes = (\\r\\nunion isfuzzy=true\\r\\n(missing_indexes_stub),\\r\\n(missing_indexes_table)\\r\\n);\\r\\n// The sample to be displayed\\r\\nlet current_snapshot = materialize(\\r\\nsqlmi_active_sessions\\r\\n| where sample_time_utc == todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where managed_instance_name =~ @\\\"{managedInstanceName}\\\"\\r\\n| where ({haReplica} and replica_type == \\\"HA secondary\\\") or (not ({haReplica}) and replica_type != \\\"HA secondary\\\")\\r\\n| project-away managed_instance_name\\r\\n| extend blocking_session_id = iif(\\r\\n blocking_session_id in (0, -4, -5), // Do not consider long latch waits when displaying blocking chains\\r\\n int(null),\\r\\n blocking_session_id\\r\\n )\\r\\n);\\r\\n// Blocking-related columns\\r\\nlet b = materialize(\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, blocking_session_id\\r\\n);\\r\\n// Assumption: blocking chains are at most 9 levels deep. If proven incorrect, introduce additional lookups\\r\\nlet head_blockers = materialize(\\r\\nb\\r\\n| lookup (b | project session_id, blocking_session_id2 = blocking_session_id) on $left.blocking_session_id == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id3 = blocking_session_id) on $left.blocking_session_id2 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id4 = blocking_session_id) on $left.blocking_session_id3 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id5 = blocking_session_id) on $left.blocking_session_id4 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id6 = blocking_session_id) on $left.blocking_session_id5 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id7 = blocking_session_id) on $left.blocking_session_id6 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id8 = blocking_session_id) on $left.blocking_session_id7 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id9 = blocking_session_id) on $left.blocking_session_id8 == $right.session_id\\r\\n| project session_id,\\r\\n request_id,\\r\\n head_blocking_session_id = coalesce(\\r\\n blocking_session_id9,\\r\\n blocking_session_id8,\\r\\n blocking_session_id7,\\r\\n blocking_session_id6,\\r\\n blocking_session_id5,\\r\\n blocking_session_id4,\\r\\n blocking_session_id3,\\r\\n blocking_session_id2,\\r\\n blocking_session_id\\r\\n )\\r\\n| where isnotempty(head_blocking_session_id)\\r\\n);\\r\\n// column labels for each grid\\r\\nlet column_labels_string = dynamic(\\r\\n[\\r\\n\\\"session_status\\\",\\\"workload_group_name\\\",\\\"connection_id\\\",\\\"encrypt_option\\\",\\\"wait_resource\\\",\\\"transaction_isolation_level\\\",\\\"query_hash\\\",\\\"query_plan_hash\\\",\\\"session_cpu_time\\\",\\\"session_duration\\\",\\\"request_cpu_time\\\",\\\"sql_module_name\\\"\\r\\n]);\\r\\nlet column_labels_number_count = dynamic(\\r\\n[\\r\\n\\\"session_logical_reads\\\",\\\"session_writes\\\",\\\"session_row_count\\\",\\\"request_row_count\\\",\\\"dop\\\",\\\"parallel_worker_count\\\", \\\"request_writes\\\"\\r\\n]);\\r\\nlet column_labels_number_id = dynamic(\\r\\n[\\r\\n\\\"database_id\\\",\\\"workload_group_id\\\",\\\"request_id\\\",\\\"sql_module_object_id\\\",\\\"blocking_session_id\\\"\\r\\n]);\\r\\nlet column_labels_number_kb = dynamic(\\r\\n[\\r\\n\\\"granted_memory\\\",\\\"requested_memory\\\",\\\"max_used_memory\\\",\\\"ideal_memory\\\"\\r\\n]);\\r\\nlet column_labels_datetime = dynamic(\\r\\n[\\r\\n\\\"login_time\\\",\\\"request_start_time\\\"\\r\\n]);\\r\\nlet column_labels_task_counts = dynamic(\\r\\n[\\r\\n\\\"pending\\\", \\\"runnable\\\", \\\"running\\\", \\\"suspended\\\", \\\"done\\\", \\\"spinloop\\\"\\r\\n]);\\r\\n// result set for strings grid\\r\\nlet session_details_string = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_status, workload_group_name, connection_id, encrypt_option, wait_resource, transaction_isolation_level, query_hash, query_plan_hash, session_cpu_time = format_timespan(session_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), request_cpu_time = format_timespan(request_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), session_duration = format_timespan(session_total_elapsed_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), sql_module_name\\r\\n| project session_id, request_id, columns = pack_all() // put all columns into a property bag\\r\\n| mv-apply column_label = column_labels_string to typeof(string) on (project column_label, column_value = tostring(columns[column_label])) // create a row for each name/value pair\\r\\n| project-away columns\\r\\n| sort by column_label asc //make_list later will preserve sort order\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value) // introduce \\\"Property\\\" and \\\"Value\\\" column names\\r\\n| summarize details_string = dynamic_to_json(make_list(c)) by session_id, request_id // for each session_id and request_id, aggregate rowset into a json array of rows, to be used as grid source\\r\\n);\\r\\n// result set for count numbers grid\\r\\nlet session_details_number_count = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_logical_reads, session_writes, session_row_count, request_row_count, dop, parallel_worker_count, request_writes\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_count to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_count = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for id numbers grid\\r\\nlet session_details_number_id = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, database_id, workload_group_id, sql_module_object_id, blocking_session_id\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_id to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_id = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for byte numbers grid\\r\\nlet session_details_number_kb = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, granted_memory = memory_grant_kb, requested_memory = requested_memory_kb, max_used_memory = max_used_memory_kb, ideal_memory = ideal_memory_kb\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_kb to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_kb = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for datetime grid\\r\\nlet session_details_datetime = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, login_time, request_start_time\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_datetime to typeof(string) on (project column_label, column_value = todatetime(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_datetime = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\nlet session_details_task_counts = ( \\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, pending = pending_task_count, runnable = runnable_task_count, running = running_task_count, suspended = suspended_task_count, done = done_task_count, spinloop = spinloop_task_count\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_task_counts to typeof(string) on (project column_label, column_value = toint(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Task type\\\", column_label, \\\"Count\\\", column_value)\\r\\n| summarize task_counts = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// return last snapshot with json arrays for each detail grid\\r\\ncurrent_snapshot\\r\\n| lookup head_blockers on $left.session_id == $right.session_id and $left.request_id == $right.request_id\\r\\n| join session_details_string on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_count on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_id on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_kb on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_datetime on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_task_counts on session_id and $left.request_id == $right.request_id\\r\\n| join kind=leftouter (\\r\\n head_blockers\\r\\n | summarize count_blocked_sessions = count() by head_blocking_session_id\\r\\n ) on $left.session_id == $right.head_blocking_session_id\\r\\n| lookup missing_indexes on query_hash\\r\\n| project request_duration = format_timespan(1ms * request_total_elapsed_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n session_id,\\r\\n database_name,\\r\\n request_status,\\r\\n command,\\r\\n input_buffer_text,\\r\\n blocked_by = head_blocking_session_id,\\r\\n count_blocked_sessions,\\r\\n count_missing_indexes,\\r\\n request_normalized_cpu_time = toreal(request_cpu_time_ms) / toreal(request_total_elapsed_time_ms),\\r\\n request_normalized_logical_reads = toreal(request_logical_reads) / toreal(request_total_elapsed_time_ms),\\r\\n dop,\\r\\n wait_type,\\r\\n wait_time = format_timespan(1ms * iif(wait_time_ms != 0, wait_time_ms, int(null)), \\\"d.hh:mm:ss.fff\\\"),\\r\\n open_transaction_count,\\r\\n request_id,\\r\\n percent_complete = iif(percent_complete == 0, real(null), percent_complete),\\r\\n tempdb_allocations_kb,\\r\\n tempdb_current_kb,\\r\\n sql_module_name,\\r\\n login_name,\\r\\n host_name,\\r\\n program_name,\\r\\n client_net_address,\\r\\n request_cpu_time = format_timespan(1ms * request_cpu_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n request_total_elapsed_time_ms,\\r\\n request_logical_reads,\\r\\n statement_text_json = dynamic_to_json(pack(\\\"statement_text\\\", statement_text)),\\r\\n input_buffer_text_json = dynamic_to_json(pack(\\\"input_buffer_text\\\", input_buffer_text)),\\r\\n session_wait_stats = iif(isempty(session_wait_stats), '[{\\\"wait_type\\\":\\\"\\\",\\\"wait_time_ms\\\":\\\"\\\",\\\"signal_wait_time_ms\\\":\\\"\\\",\\\"max_wait_time_ms\\\":\\\"\\\",\\\"waiting_tasks_count\\\":\\\"\\\"}]', session_wait_stats),\\r\\n details_datetime,\\r\\n details_number_id,\\r\\n details_number_kb,\\r\\n details_string,\\r\\n details_number_count,\\r\\n task_counts,\\r\\n create_index_statements,\\r\\n blocked_tooltip_text = iif(isnull(head_blocking_session_id), \\\"\\\", strcat(\\\"Session \\\", tostring(session_id), \\\" is blocked. The head blocker of the blocking chain is session \\\", tostring(head_blocking_session_id), \\\".\\\")),\\r\\n blocker_tooltip_text = iif(count_blocked_sessions > 0, strcat(\\\"Session \\\", session_id, \\\" is a head blocker in a blocking chain. It blocks \\\", count_blocked_sessions, \\\" other session(s).\\\"), \\\"\\\")\\r\\n| extend request_normalized_cpu_time = iif(isfinite(request_normalized_cpu_time), request_normalized_cpu_time, real(null)),\\r\\n request_normalized_logical_reads = iif(isfinite(request_normalized_logical_reads), request_normalized_logical_reads, real(null))\\r\\n| where \\\"{blockedFilter}\\\" == \\\"all\\\"\\r\\n or\\r\\n (\\\"{blockedFilter}\\\" == \\\"blocked\\\" and (isnotnull(blocked_by) or isnotnull(count_blocked_sessions)))\\r\\n| sort by count_blocked_sessions desc nulls last, request_total_elapsed_time_ms desc nulls last // blockers first, followed by sessions with longest running requests, then longest running sessions\\r\\n\",\"clusterName\":\"{adxClusterUri}\",\"databaseName\":\"{adxDatabase}\"}", "size": 0, "showAnalytics": true, "title": "Active sessions", @@ -551,7 +551,7 @@ "customColumnWidthSetting": "15ch" }, "tooltipFormat": { - "tooltip": "Session [\"session_id\"] is blocked. The head blocker of the blocking chain is session [\"blocked_by\"]." + "tooltip": "[\"blocked_tooltip_text\"]" } }, { @@ -576,7 +576,7 @@ "customColumnWidthSetting": "12ch" }, "tooltipFormat": { - "tooltip": "Session [\"session_id\"] is a head blocker in a blocking chain. It blocks [\"count_blocked_sessions\"] other session(s)." + "tooltip": "[\"blocker_tooltip_text\"]" } }, { @@ -797,6 +797,14 @@ { "columnMatch": "create_index_statements", "formatter": 5 + }, + { + "columnMatch": "blocked_tooltip_text", + "formatter": 5 + }, + { + "columnMatch": "blocker_tooltip_text", + "formatter": 5 } ], "filter": true, diff --git a/Workbooks/Database watcher/SQL Server/instance/activity/activity.workbook b/Workbooks/Database watcher/SQL Server/instance/activity/activity.workbook index 1a945a2179..6f7c47aa95 100644 --- a/Workbooks/Database watcher/SQL Server/instance/activity/activity.workbook +++ b/Workbooks/Database watcher/SQL Server/instance/activity/activity.workbook @@ -313,7 +313,7 @@ "type": 3, "content": { "version": "KqlItem/1.0", - "query": "{\"version\":\"AzureDataExplorerQuery/1.0\",\"queryText\":\"let max_missing_indexes_per_query_hash = 30;\\r\\n// The missing indexes table may not exist. Work around by using a same-schema stub in a fuzzy union.\\r\\nlet missing_indexes_stub = view() {\\r\\nprint query_hash = \\\"\\\", missing_index_label = \\\"\\\", create_index_statements = \\\"\\\"\\r\\n| where isnotempty(query_hash)\\r\\n};\\r\\nlet missing_indexes_table = view() {\\r\\nsqlserver_missing_indexes\\r\\n| where sample_time_utc between ({timeRange:start} .. {timeRange:end})\\r\\n| where sample_time_utc <= todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where server_name =~ @\\\"{serverName}\\\"\\r\\n| where machine_name =~ @\\\"{machineName}\\\"\\r\\n| where grain_type == \\\"query_plan\\\"\\r\\n// Use the last missing index sample within the relevant interval\\r\\n| summarize arg_max(sample_time_utc, schema_name, object_name, database_engine_start_time_utc, avg_user_impact, equality_columns, inequality_columns, included_columns, user_seeks, user_scans, last_user_seek_utc, last_user_scan_utc)\\r\\n by machine_name, server_name, database_name, query_hash, query_plan_hash\\r\\n// Remove dups that may be present in different plans\\r\\n| summarize sample_time_utc = min(sample_time_utc),\\r\\n database_engine_start_time_utc = min(database_engine_start_time_utc),\\r\\n avg_user_impact = max(avg_user_impact),\\r\\n user_seeks = sum(user_seeks),\\r\\n user_scans = sum(user_scans),\\r\\n last_user_seek_utc = max(last_user_seek_utc),\\r\\n last_user_scan_utc = max(last_user_scan_utc),\\r\\n query_plan_hash = take_any(query_plan_hash),\\r\\n count_plans = dcount(query_plan_hash)\\r\\n by machine_name, server_name, database_name, query_hash, schema_name, object_name, equality_columns, inequality_columns, included_columns\\r\\n| extend duration_since_startup_seconds = 1s * datetime_diff(\\\"second\\\", sample_time_utc, database_engine_start_time_utc),\\r\\n query_plan_hash = iif(count_plans == 1, tostring(query_plan_hash), strcat(query_plan_hash, \\\" (+\\\", tostring(count_plans - 1), \\\")\\\"))\\r\\n| where not(schema_name has_any(\\\"[\\\",\\\"]\\\")) and not(object_name has_any(\\\"[\\\",\\\"]\\\"))\\r\\n| extend create_index_statement = strcat(\\r\\n\\\"/* \\\",\\r\\n\\\"\\\\r\\\\nMachine: \\\", machine_name,\\r\\n\\\"\\\\r\\\\nServer: \\\", server_name,\\r\\n\\\"\\\\r\\\\nDatabase: \\\", database_name,\\r\\n\\\"\\\\r\\\\nQuery hash: \\\", query_hash,\\r\\n\\\"\\\\r\\\\nQuery plan hash: \\\", query_plan_hash,\\r\\n\\\"\\\\r\\\\nAverage impact (%): \\\", avg_user_impact,\\r\\n\\\"\\\\r\\\\nSeeks: \\\", user_seeks,\\r\\n\\\"\\\\r\\\\nScans: \\\", user_scans,\\r\\n\\\"\\\\r\\\\nLast seek time (UTC): \\\", iif(isnotempty(last_user_seek_utc), format_datetime(last_user_seek_utc, \\\"yyyy-MM-dd HH:mm:ss.fff\\\"), \\\"N/A\\\"),\\r\\n\\\"\\\\r\\\\nLast scan time (UTC): \\\", iif(isnotempty(last_user_scan_utc), format_datetime(last_user_scan_utc, \\\"yyyy-MM-dd HH:mm:ss.fff\\\"), \\\"N/A\\\"),\\r\\n\\\"\\\\r\\\\nInterval duration: \\\", strcat(format_timespan(duration_since_startup_seconds, \\\"d\\\"), \\\" days, \\\", format_timespan(duration_since_startup_seconds, \\\"hh\\\"), \\\" hours, \\\", format_timespan(duration_since_startup_seconds, \\\"m\\\"), \\\" minutes\\\"),\\r\\n\\\"\\\\r\\\\n*/\\\",\\r\\n\\\"\\\\r\\\\nCREATE INDEX [replace-with-index-name] ON [\\\", schema_name, \\\"].[\\\", object_name, \\\"]\\\",\\r\\n\\\"\\\\r\\\\n(\\\",\\r\\niif(isnotempty(equality_columns), strcat(\\\"\\\\r\\\\n\\\", equality_columns, iif(isnotempty(inequality_columns), \\\",\\\", \\\"\\\"), \\\" /* equality columns */\\\"), \\\"\\\"),\\r\\niif(isnotempty(inequality_columns), strcat(\\\"\\\\r\\\\n\\\", inequality_columns, \\\" /* inequality columns */\\\"), \\\"\\\"),\\r\\n\\\"\\\\r\\\\n)\\\",\\r\\niif(isnotempty(included_columns), strcat(\\\"\\\\r\\\\nINCLUDE (\\\", included_columns, \\\")\\\"), \\\"\\\"),\\r\\n\\\"\\\\r\\\\nWITH /* Adjust index options as needed */\\\",\\r\\n\\\"\\\\r\\\\n(\\\",\\r\\n\\\"\\\\r\\\\nMAXDOP = 8,\\\",\\r\\n\\\"\\\\r\\\\nONLINE = ON (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 5 MINUTES, ABORT_AFTER_WAIT = SELF)),\\\",\\r\\n\\\"\\\\r\\\\n-- RESUMABLE = ON,\\\",\\r\\n\\\"\\\\r\\\\n-- DATA_COMPRESSION = ROW,\\\",\\r\\n\\\"\\\\r\\\\n-- DATA_COMPRESSION = PAGE,\\\",\\r\\n\\\"\\\\r\\\\nSORT_IN_TEMPDB = ON\\\",\\r\\n\\\"\\\\r\\\\n);\\\",\\r\\n\\\"\\\\r\\\\n\\\"\\r\\n)\\r\\n| summarize count_missing_indexes = min_of(count(), max_missing_indexes_per_query_hash),\\r\\n statements = make_list(create_index_statement, max_missing_indexes_per_query_hash) by query_hash\\r\\n| project query_hash,\\r\\n count_missing_indexes,\\r\\n create_index_statements = strcat(\\r\\n\\\"/*\\\",\\r\\n\\\"\\\\r\\\\nIMPORTANT:\\\",\\r\\n\\\"\\\\r\\\\nCREATE INDEX statements in this script are tentative suggestions.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nBefore creating indexes based on these suggestions:\\\",\\r\\n\\\"\\\\r\\\\n1. Examine existing indexes. If possible, modify them instead of creating a new index.\\\",\\r\\n\\\"\\\\r\\\\n2. When there are multiple similar index suggestions, consider combining them into one index.\\\",\\r\\n\\\"\\\\r\\\\n3. Always test and validate that a new or modified index does improve performance.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nAvoid creating many little-used indexes. That slows down inserts, updates,\\\",\\r\\n\\\"\\\\r\\\\nand deletes substantially, and increases storage and memory consumption.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nFor more information, see https://go.microsoft.com/fwlink/?linkid=2245704.\\\",\\r\\n\\\"\\\\r\\\\n*/\\\\r\\\\n\\\\r\\\\n\\\",\\r\\nstrcat_array(statements, \\\"\\\\r\\\\n\\\")\\r\\n)\\r\\n};\\r\\nlet missing_indexes = (\\r\\nunion isfuzzy=true\\r\\n(missing_indexes_stub),\\r\\n(missing_indexes_table)\\r\\n);\\r\\n// The sample to be displayed\\r\\nlet current_snapshot = materialize(\\r\\nsqlserver_active_sessions\\r\\n| where sample_time_utc == todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where machine_name =~ @\\\"{machineName}\\\"\\r\\n| where server_name =~ @\\\"{serverName}\\\"\\r\\n| project-away server_name, machine_name\\r\\n| extend blocking_session_id = iif(\\r\\n blocking_session_id in (0, -4, -5), // Do not consider long latch waits when displaying blocking chains\\r\\n int(null),\\r\\n blocking_session_id\\r\\n )\\r\\n);\\r\\n// Blocking-related columns\\r\\nlet b = materialize(\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, blocking_session_id\\r\\n);\\r\\n// Assumption: blocking chains are at most 9 levels deep. If proven incorrect, introduce additional lookups\\r\\nlet head_blockers = materialize(\\r\\nb\\r\\n| lookup (b | project session_id, blocking_session_id2 = blocking_session_id) on $left.blocking_session_id == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id3 = blocking_session_id) on $left.blocking_session_id2 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id4 = blocking_session_id) on $left.blocking_session_id3 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id5 = blocking_session_id) on $left.blocking_session_id4 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id6 = blocking_session_id) on $left.blocking_session_id5 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id7 = blocking_session_id) on $left.blocking_session_id6 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id8 = blocking_session_id) on $left.blocking_session_id7 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id9 = blocking_session_id) on $left.blocking_session_id8 == $right.session_id\\r\\n| project session_id,\\r\\n request_id,\\r\\n head_blocking_session_id = coalesce(\\r\\n blocking_session_id9,\\r\\n blocking_session_id8,\\r\\n blocking_session_id7,\\r\\n blocking_session_id6,\\r\\n blocking_session_id5,\\r\\n blocking_session_id4,\\r\\n blocking_session_id3,\\r\\n blocking_session_id2,\\r\\n blocking_session_id\\r\\n )\\r\\n| where isnotempty(head_blocking_session_id)\\r\\n);\\r\\n// column labels for each grid\\r\\nlet column_labels_string = dynamic(\\r\\n[\\r\\n\\\"session_status\\\",\\\"workload_group_name\\\",\\\"connection_id\\\",\\\"encrypt_option\\\",\\\"wait_resource\\\",\\\"transaction_isolation_level\\\",\\\"query_hash\\\",\\\"query_plan_hash\\\",\\\"session_cpu_time\\\",\\\"session_duration\\\",\\\"request_cpu_time\\\",\\\"sql_module_name\\\"\\r\\n]);\\r\\nlet column_labels_number_count = dynamic(\\r\\n[\\r\\n\\\"session_logical_reads\\\",\\\"session_writes\\\",\\\"session_row_count\\\",\\\"request_row_count\\\",\\\"dop\\\",\\\"parallel_worker_count\\\", \\\"request_writes\\\"\\r\\n]);\\r\\nlet column_labels_number_id = dynamic(\\r\\n[\\r\\n\\\"database_id\\\",\\\"workload_group_id\\\",\\\"request_id\\\",\\\"sql_module_object_id\\\",\\\"blocking_session_id\\\"\\r\\n]);\\r\\nlet column_labels_number_kb = dynamic(\\r\\n[\\r\\n\\\"granted_memory\\\",\\\"requested_memory\\\",\\\"max_used_memory\\\",\\\"ideal_memory\\\"\\r\\n]);\\r\\nlet column_labels_datetime = dynamic(\\r\\n[\\r\\n\\\"login_time\\\",\\\"request_start_time\\\"\\r\\n]);\\r\\nlet column_labels_task_counts = dynamic(\\r\\n[\\r\\n\\\"pending\\\", \\\"runnable\\\", \\\"running\\\", \\\"suspended\\\", \\\"done\\\", \\\"spinloop\\\"\\r\\n]);\\r\\n// result set for strings grid\\r\\nlet session_details_string = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_status, workload_group_name, connection_id, encrypt_option, wait_resource, transaction_isolation_level, query_hash, query_plan_hash, session_cpu_time = format_timespan(session_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), request_cpu_time = format_timespan(request_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), session_duration = format_timespan(session_total_elapsed_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), sql_module_name\\r\\n| project session_id, request_id, columns = pack_all() // put all columns into a property bag\\r\\n| mv-apply column_label = column_labels_string to typeof(string) on (project column_label, column_value = tostring(columns[column_label])) // create a row for each name/value pair\\r\\n| project-away columns\\r\\n| sort by column_label asc //make_list later will preserve sort order\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value) // introduce \\\"Property\\\" and \\\"Value\\\" column names\\r\\n| summarize details_string = dynamic_to_json(make_list(c)) by session_id, request_id // for each session_id and request_id, aggregate rowset into a json array of rows, to be used as grid source\\r\\n);\\r\\n// result set for count numbers grid\\r\\nlet session_details_number_count = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_logical_reads, session_writes, session_row_count, request_row_count, dop, parallel_worker_count, request_writes\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_count to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_count = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for id numbers grid\\r\\nlet session_details_number_id = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, database_id, workload_group_id, sql_module_object_id, blocking_session_id\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_id to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_id = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for byte numbers grid\\r\\nlet session_details_number_kb = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, granted_memory = memory_grant_kb, requested_memory = requested_memory_kb, max_used_memory = max_used_memory_kb, ideal_memory = ideal_memory_kb\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_kb to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_kb = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for datetime grid\\r\\nlet session_details_datetime = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, login_time, request_start_time\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_datetime to typeof(string) on (project column_label, column_value = todatetime(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_datetime = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\nlet session_details_task_counts = ( \\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, pending = pending_task_count, runnable = runnable_task_count, running = running_task_count, suspended = suspended_task_count, done = done_task_count, spinloop = spinloop_task_count\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_task_counts to typeof(string) on (project column_label, column_value = toint(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Task type\\\", column_label, \\\"Count\\\", column_value)\\r\\n| summarize task_counts = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// return last snapshot with json arrays for each detail grid\\r\\ncurrent_snapshot\\r\\n| lookup head_blockers on $left.session_id == $right.session_id and $left.request_id == $right.request_id\\r\\n| join session_details_string on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_count on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_id on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_kb on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_datetime on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_task_counts on session_id and $left.request_id == $right.request_id\\r\\n| join kind=leftouter (\\r\\n head_blockers\\r\\n | summarize count_blocked_sessions = count() by head_blocking_session_id\\r\\n ) on $left.session_id == $right.head_blocking_session_id\\r\\n| lookup missing_indexes on query_hash\\r\\n| project request_duration = format_timespan(1ms * request_total_elapsed_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n session_id,\\r\\n database_name,\\r\\n request_status,\\r\\n command,\\r\\n input_buffer_text,\\r\\n blocked_by = head_blocking_session_id,\\r\\n count_blocked_sessions,\\r\\n count_missing_indexes,\\r\\n request_normalized_cpu_time = toreal(request_cpu_time_ms) / toreal(request_total_elapsed_time_ms),\\r\\n request_normalized_logical_reads = toreal(request_logical_reads) / toreal(request_total_elapsed_time_ms),\\r\\n dop,\\r\\n wait_type,\\r\\n wait_time = format_timespan(1ms * iif(wait_time_ms != 0, wait_time_ms, int(null)), \\\"d.hh:mm:ss.fff\\\"),\\r\\n open_transaction_count,\\r\\n request_id,\\r\\n percent_complete = iif(percent_complete == 0, real(null), percent_complete),\\r\\n tempdb_allocations_kb,\\r\\n tempdb_current_kb,\\r\\n sql_module_name,\\r\\n login_name,\\r\\n host_name,\\r\\n program_name,\\r\\n client_net_address,\\r\\n request_cpu_time = format_timespan(1ms * request_cpu_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n request_total_elapsed_time_ms,\\r\\n request_logical_reads,\\r\\n statement_text_json = dynamic_to_json(pack(\\\"statement_text\\\", statement_text)),\\r\\n input_buffer_text_json = dynamic_to_json(pack(\\\"input_buffer_text\\\", input_buffer_text)),\\r\\n session_wait_stats = iif(isempty(session_wait_stats), '[{\\\"wait_type\\\":\\\"\\\",\\\"wait_time_ms\\\":\\\"\\\",\\\"signal_wait_time_ms\\\":\\\"\\\",\\\"max_wait_time_ms\\\":\\\"\\\",\\\"waiting_tasks_count\\\":\\\"\\\"}]', session_wait_stats),\\r\\n details_datetime,\\r\\n details_number_id,\\r\\n details_number_kb,\\r\\n details_string,\\r\\n details_number_count,\\r\\n task_counts,\\r\\n create_index_statements\\r\\n| extend request_normalized_cpu_time = iif(isfinite(request_normalized_cpu_time), request_normalized_cpu_time, real(null)),\\r\\n request_normalized_logical_reads = iif(isfinite(request_normalized_logical_reads), request_normalized_logical_reads, real(null))\\r\\n| where \\\"{blockedFilter}\\\" == \\\"all\\\"\\r\\n or\\r\\n (\\\"{blockedFilter}\\\" == \\\"blocked\\\" and (isnotnull(blocked_by) or isnotnull(count_blocked_sessions)))\\r\\n| sort by count_blocked_sessions desc nulls last, request_total_elapsed_time_ms desc nulls last // blockers first, followed by sessions with longest running requests, then longest running sessions\\r\\n\",\"clusterName\":\"{adxClusterUri}\",\"databaseName\":\"{adxDatabase}\"}", + "query": "{\"version\":\"AzureDataExplorerQuery/1.0\",\"queryText\":\"let max_missing_indexes_per_query_hash = 30;\\r\\n// The missing indexes table may not exist. Work around by using a same-schema stub in a fuzzy union.\\r\\nlet missing_indexes_stub = view() {\\r\\nprint query_hash = \\\"\\\", missing_index_label = \\\"\\\", create_index_statements = \\\"\\\"\\r\\n| where isnotempty(query_hash)\\r\\n};\\r\\nlet missing_indexes_table = view() {\\r\\nsqlserver_missing_indexes\\r\\n| where sample_time_utc between ({timeRange:start} .. {timeRange:end})\\r\\n| where sample_time_utc <= todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where server_name =~ @\\\"{serverName}\\\"\\r\\n| where machine_name =~ @\\\"{machineName}\\\"\\r\\n| where grain_type == \\\"query_plan\\\"\\r\\n// Use the last missing index sample within the relevant interval\\r\\n| summarize arg_max(sample_time_utc, schema_name, object_name, database_engine_start_time_utc, avg_user_impact, equality_columns, inequality_columns, included_columns, user_seeks, user_scans, last_user_seek_utc, last_user_scan_utc)\\r\\n by machine_name, server_name, database_name, query_hash, query_plan_hash\\r\\n// Remove dups that may be present in different plans\\r\\n| summarize sample_time_utc = min(sample_time_utc),\\r\\n database_engine_start_time_utc = min(database_engine_start_time_utc),\\r\\n avg_user_impact = max(avg_user_impact),\\r\\n user_seeks = sum(user_seeks),\\r\\n user_scans = sum(user_scans),\\r\\n last_user_seek_utc = max(last_user_seek_utc),\\r\\n last_user_scan_utc = max(last_user_scan_utc),\\r\\n query_plan_hash = take_any(query_plan_hash),\\r\\n count_plans = dcount(query_plan_hash)\\r\\n by machine_name, server_name, database_name, query_hash, schema_name, object_name, equality_columns, inequality_columns, included_columns\\r\\n| extend duration_since_startup_seconds = 1s * datetime_diff(\\\"second\\\", sample_time_utc, database_engine_start_time_utc),\\r\\n query_plan_hash = iif(count_plans == 1, tostring(query_plan_hash), strcat(query_plan_hash, \\\" (+\\\", tostring(count_plans - 1), \\\")\\\"))\\r\\n| where not(schema_name has_any(\\\"[\\\",\\\"]\\\")) and not(object_name has_any(\\\"[\\\",\\\"]\\\"))\\r\\n| extend create_index_statement = strcat(\\r\\n\\\"/* \\\",\\r\\n\\\"\\\\r\\\\nMachine: \\\", machine_name,\\r\\n\\\"\\\\r\\\\nServer: \\\", server_name,\\r\\n\\\"\\\\r\\\\nDatabase: \\\", database_name,\\r\\n\\\"\\\\r\\\\nQuery hash: \\\", query_hash,\\r\\n\\\"\\\\r\\\\nQuery plan hash: \\\", query_plan_hash,\\r\\n\\\"\\\\r\\\\nAverage impact (%): \\\", avg_user_impact,\\r\\n\\\"\\\\r\\\\nSeeks: \\\", user_seeks,\\r\\n\\\"\\\\r\\\\nScans: \\\", user_scans,\\r\\n\\\"\\\\r\\\\nLast seek time (UTC): \\\", iif(isnotempty(last_user_seek_utc), format_datetime(last_user_seek_utc, \\\"yyyy-MM-dd HH:mm:ss.fff\\\"), \\\"N/A\\\"),\\r\\n\\\"\\\\r\\\\nLast scan time (UTC): \\\", iif(isnotempty(last_user_scan_utc), format_datetime(last_user_scan_utc, \\\"yyyy-MM-dd HH:mm:ss.fff\\\"), \\\"N/A\\\"),\\r\\n\\\"\\\\r\\\\nInterval duration: \\\", strcat(format_timespan(duration_since_startup_seconds, \\\"d\\\"), \\\" days, \\\", format_timespan(duration_since_startup_seconds, \\\"hh\\\"), \\\" hours, \\\", format_timespan(duration_since_startup_seconds, \\\"m\\\"), \\\" minutes\\\"),\\r\\n\\\"\\\\r\\\\n*/\\\",\\r\\n\\\"\\\\r\\\\nCREATE INDEX [replace-with-index-name] ON [\\\", schema_name, \\\"].[\\\", object_name, \\\"]\\\",\\r\\n\\\"\\\\r\\\\n(\\\",\\r\\niif(isnotempty(equality_columns), strcat(\\\"\\\\r\\\\n\\\", equality_columns, iif(isnotempty(inequality_columns), \\\",\\\", \\\"\\\"), \\\" /* equality columns */\\\"), \\\"\\\"),\\r\\niif(isnotempty(inequality_columns), strcat(\\\"\\\\r\\\\n\\\", inequality_columns, \\\" /* inequality columns */\\\"), \\\"\\\"),\\r\\n\\\"\\\\r\\\\n)\\\",\\r\\niif(isnotempty(included_columns), strcat(\\\"\\\\r\\\\nINCLUDE (\\\", included_columns, \\\")\\\"), \\\"\\\"),\\r\\n\\\"\\\\r\\\\nWITH /* Adjust index options as needed */\\\",\\r\\n\\\"\\\\r\\\\n(\\\",\\r\\n\\\"\\\\r\\\\nMAXDOP = 8,\\\",\\r\\n\\\"\\\\r\\\\nONLINE = ON (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 5 MINUTES, ABORT_AFTER_WAIT = SELF)),\\\",\\r\\n\\\"\\\\r\\\\n-- RESUMABLE = ON,\\\",\\r\\n\\\"\\\\r\\\\n-- DATA_COMPRESSION = ROW,\\\",\\r\\n\\\"\\\\r\\\\n-- DATA_COMPRESSION = PAGE,\\\",\\r\\n\\\"\\\\r\\\\nSORT_IN_TEMPDB = ON\\\",\\r\\n\\\"\\\\r\\\\n);\\\",\\r\\n\\\"\\\\r\\\\n\\\"\\r\\n)\\r\\n| summarize count_missing_indexes = min_of(count(), max_missing_indexes_per_query_hash),\\r\\n statements = make_list(create_index_statement, max_missing_indexes_per_query_hash) by query_hash\\r\\n| project query_hash,\\r\\n count_missing_indexes,\\r\\n create_index_statements = strcat(\\r\\n\\\"/*\\\",\\r\\n\\\"\\\\r\\\\nIMPORTANT:\\\",\\r\\n\\\"\\\\r\\\\nCREATE INDEX statements in this script are tentative suggestions.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nBefore creating indexes based on these suggestions:\\\",\\r\\n\\\"\\\\r\\\\n1. Examine existing indexes. If possible, modify them instead of creating a new index.\\\",\\r\\n\\\"\\\\r\\\\n2. When there are multiple similar index suggestions, consider combining them into one index.\\\",\\r\\n\\\"\\\\r\\\\n3. Always test and validate that a new or modified index does improve performance.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nAvoid creating many little-used indexes. That slows down inserts, updates,\\\",\\r\\n\\\"\\\\r\\\\nand deletes substantially, and increases storage and memory consumption.\\\",\\r\\n\\\"\\\\r\\\\n\\\\r\\\\nFor more information, see https://go.microsoft.com/fwlink/?linkid=2245704.\\\",\\r\\n\\\"\\\\r\\\\n*/\\\\r\\\\n\\\\r\\\\n\\\",\\r\\nstrcat_array(statements, \\\"\\\\r\\\\n\\\")\\r\\n)\\r\\n};\\r\\nlet missing_indexes = (\\r\\nunion isfuzzy=true\\r\\n(missing_indexes_stub),\\r\\n(missing_indexes_table)\\r\\n);\\r\\n// The sample to be displayed\\r\\nlet current_snapshot = materialize(\\r\\nsqlserver_active_sessions\\r\\n| where sample_time_utc == todatetime(\\\"{activitySampleTime}\\\")\\r\\n| where machine_name =~ @\\\"{machineName}\\\"\\r\\n| where server_name =~ @\\\"{serverName}\\\"\\r\\n| project-away server_name, machine_name\\r\\n| extend blocking_session_id = iif(\\r\\n blocking_session_id in (0, -4, -5), // Do not consider long latch waits when displaying blocking chains\\r\\n int(null),\\r\\n blocking_session_id\\r\\n )\\r\\n);\\r\\n// Blocking-related columns\\r\\nlet b = materialize(\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, blocking_session_id\\r\\n);\\r\\n// Assumption: blocking chains are at most 9 levels deep. If proven incorrect, introduce additional lookups\\r\\nlet head_blockers = materialize(\\r\\nb\\r\\n| lookup (b | project session_id, blocking_session_id2 = blocking_session_id) on $left.blocking_session_id == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id3 = blocking_session_id) on $left.blocking_session_id2 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id4 = blocking_session_id) on $left.blocking_session_id3 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id5 = blocking_session_id) on $left.blocking_session_id4 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id6 = blocking_session_id) on $left.blocking_session_id5 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id7 = blocking_session_id) on $left.blocking_session_id6 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id8 = blocking_session_id) on $left.blocking_session_id7 == $right.session_id\\r\\n| lookup (b | project session_id, blocking_session_id9 = blocking_session_id) on $left.blocking_session_id8 == $right.session_id\\r\\n| project session_id,\\r\\n request_id,\\r\\n head_blocking_session_id = coalesce(\\r\\n blocking_session_id9,\\r\\n blocking_session_id8,\\r\\n blocking_session_id7,\\r\\n blocking_session_id6,\\r\\n blocking_session_id5,\\r\\n blocking_session_id4,\\r\\n blocking_session_id3,\\r\\n blocking_session_id2,\\r\\n blocking_session_id\\r\\n )\\r\\n| where isnotempty(head_blocking_session_id)\\r\\n);\\r\\n// column labels for each grid\\r\\nlet column_labels_string = dynamic(\\r\\n[\\r\\n\\\"session_status\\\",\\\"workload_group_name\\\",\\\"connection_id\\\",\\\"encrypt_option\\\",\\\"wait_resource\\\",\\\"transaction_isolation_level\\\",\\\"query_hash\\\",\\\"query_plan_hash\\\",\\\"session_cpu_time\\\",\\\"session_duration\\\",\\\"request_cpu_time\\\",\\\"sql_module_name\\\"\\r\\n]);\\r\\nlet column_labels_number_count = dynamic(\\r\\n[\\r\\n\\\"session_logical_reads\\\",\\\"session_writes\\\",\\\"session_row_count\\\",\\\"request_row_count\\\",\\\"dop\\\",\\\"parallel_worker_count\\\", \\\"request_writes\\\"\\r\\n]);\\r\\nlet column_labels_number_id = dynamic(\\r\\n[\\r\\n\\\"database_id\\\",\\\"workload_group_id\\\",\\\"request_id\\\",\\\"sql_module_object_id\\\",\\\"blocking_session_id\\\"\\r\\n]);\\r\\nlet column_labels_number_kb = dynamic(\\r\\n[\\r\\n\\\"granted_memory\\\",\\\"requested_memory\\\",\\\"max_used_memory\\\",\\\"ideal_memory\\\"\\r\\n]);\\r\\nlet column_labels_datetime = dynamic(\\r\\n[\\r\\n\\\"login_time\\\",\\\"request_start_time\\\"\\r\\n]);\\r\\nlet column_labels_task_counts = dynamic(\\r\\n[\\r\\n\\\"pending\\\", \\\"runnable\\\", \\\"running\\\", \\\"suspended\\\", \\\"done\\\", \\\"spinloop\\\"\\r\\n]);\\r\\n// result set for strings grid\\r\\nlet session_details_string = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_status, workload_group_name, connection_id, encrypt_option, wait_resource, transaction_isolation_level, query_hash, query_plan_hash, session_cpu_time = format_timespan(session_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), request_cpu_time = format_timespan(request_cpu_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), session_duration = format_timespan(session_total_elapsed_time_ms * 1ms, \\\"d.hh:mm:ss.fff\\\"), sql_module_name\\r\\n| project session_id, request_id, columns = pack_all() // put all columns into a property bag\\r\\n| mv-apply column_label = column_labels_string to typeof(string) on (project column_label, column_value = tostring(columns[column_label])) // create a row for each name/value pair\\r\\n| project-away columns\\r\\n| sort by column_label asc //make_list later will preserve sort order\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value) // introduce \\\"Property\\\" and \\\"Value\\\" column names\\r\\n| summarize details_string = dynamic_to_json(make_list(c)) by session_id, request_id // for each session_id and request_id, aggregate rowset into a json array of rows, to be used as grid source\\r\\n);\\r\\n// result set for count numbers grid\\r\\nlet session_details_number_count = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, session_logical_reads, session_writes, session_row_count, request_row_count, dop, parallel_worker_count, request_writes\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_count to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_count = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for id numbers grid\\r\\nlet session_details_number_id = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, database_id, workload_group_id, sql_module_object_id, blocking_session_id\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_id to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_id = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for byte numbers grid\\r\\nlet session_details_number_kb = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, granted_memory = memory_grant_kb, requested_memory = requested_memory_kb, max_used_memory = max_used_memory_kb, ideal_memory = ideal_memory_kb\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_number_kb to typeof(string) on (project column_label, column_value = tolong(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_number_kb = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// result set for datetime grid\\r\\nlet session_details_datetime = (\\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, login_time, request_start_time\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_datetime to typeof(string) on (project column_label, column_value = todatetime(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Property\\\", column_label, \\\"Value\\\", column_value)\\r\\n| summarize details_datetime = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\nlet session_details_task_counts = ( \\r\\ncurrent_snapshot\\r\\n| project session_id, request_id, pending = pending_task_count, runnable = runnable_task_count, running = running_task_count, suspended = suspended_task_count, done = done_task_count, spinloop = spinloop_task_count\\r\\n| project session_id, request_id, columns = pack_all()\\r\\n| mv-apply column_label = column_labels_task_counts to typeof(string) on (project column_label, column_value = toint(columns[column_label]))\\r\\n| project-away columns\\r\\n| sort by column_label asc\\r\\n| project session_id, request_id, c = pack(\\\"Task type\\\", column_label, \\\"Count\\\", column_value)\\r\\n| summarize task_counts = dynamic_to_json(make_list(c)) by session_id, request_id\\r\\n);\\r\\n// return last snapshot with json arrays for each detail grid\\r\\ncurrent_snapshot\\r\\n| lookup head_blockers on $left.session_id == $right.session_id and $left.request_id == $right.request_id\\r\\n| join session_details_string on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_count on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_id on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_number_kb on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_datetime on session_id and $left.request_id == $right.request_id\\r\\n| join session_details_task_counts on session_id and $left.request_id == $right.request_id\\r\\n| join kind=leftouter (\\r\\n head_blockers\\r\\n | summarize count_blocked_sessions = count() by head_blocking_session_id\\r\\n ) on $left.session_id == $right.head_blocking_session_id\\r\\n| lookup missing_indexes on query_hash\\r\\n| project request_duration = format_timespan(1ms * request_total_elapsed_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n session_id,\\r\\n database_name,\\r\\n request_status,\\r\\n command,\\r\\n input_buffer_text,\\r\\n blocked_by = head_blocking_session_id,\\r\\n count_blocked_sessions,\\r\\n count_missing_indexes,\\r\\n request_normalized_cpu_time = toreal(request_cpu_time_ms) / toreal(request_total_elapsed_time_ms),\\r\\n request_normalized_logical_reads = toreal(request_logical_reads) / toreal(request_total_elapsed_time_ms),\\r\\n dop,\\r\\n wait_type,\\r\\n wait_time = format_timespan(1ms * iif(wait_time_ms != 0, wait_time_ms, int(null)), \\\"d.hh:mm:ss.fff\\\"),\\r\\n open_transaction_count,\\r\\n request_id,\\r\\n percent_complete = iif(percent_complete == 0, real(null), percent_complete),\\r\\n tempdb_allocations_kb,\\r\\n tempdb_current_kb,\\r\\n sql_module_name,\\r\\n login_name,\\r\\n host_name,\\r\\n program_name,\\r\\n client_net_address,\\r\\n request_cpu_time = format_timespan(1ms * request_cpu_time_ms, \\\"d.hh:mm:ss.fff\\\"),\\r\\n request_total_elapsed_time_ms,\\r\\n request_logical_reads,\\r\\n statement_text_json = dynamic_to_json(pack(\\\"statement_text\\\", statement_text)),\\r\\n input_buffer_text_json = dynamic_to_json(pack(\\\"input_buffer_text\\\", input_buffer_text)),\\r\\n session_wait_stats = iif(isempty(session_wait_stats), '[{\\\"wait_type\\\":\\\"\\\",\\\"wait_time_ms\\\":\\\"\\\",\\\"signal_wait_time_ms\\\":\\\"\\\",\\\"max_wait_time_ms\\\":\\\"\\\",\\\"waiting_tasks_count\\\":\\\"\\\"}]', session_wait_stats),\\r\\n details_datetime,\\r\\n details_number_id,\\r\\n details_number_kb,\\r\\n details_string,\\r\\n details_number_count,\\r\\n task_counts,\\r\\n create_index_statements,\\r\\n blocked_tooltip_text = iif(isnull(head_blocking_session_id), \\\"\\\", strcat(\\\"Session \\\", tostring(session_id), \\\" is blocked. The head blocker of the blocking chain is session \\\", tostring(head_blocking_session_id), \\\".\\\")),\\r\\n blocker_tooltip_text = iif(count_blocked_sessions > 0, strcat(\\\"Session \\\", session_id, \\\" is a head blocker in a blocking chain. It blocks \\\", count_blocked_sessions, \\\" other session(s).\\\"), \\\"\\\")\\r\\n| extend request_normalized_cpu_time = iif(isfinite(request_normalized_cpu_time), request_normalized_cpu_time, real(null)),\\r\\n request_normalized_logical_reads = iif(isfinite(request_normalized_logical_reads), request_normalized_logical_reads, real(null))\\r\\n| where \\\"{blockedFilter}\\\" == \\\"all\\\"\\r\\n or\\r\\n (\\\"{blockedFilter}\\\" == \\\"blocked\\\" and (isnotnull(blocked_by) or isnotnull(count_blocked_sessions)))\\r\\n| sort by count_blocked_sessions desc nulls last, request_total_elapsed_time_ms desc nulls last // blockers first, followed by sessions with longest running requests, then longest running sessions\\r\\n\",\"clusterName\":\"{adxClusterUri}\",\"databaseName\":\"{adxDatabase}\"}", "size": 0, "showAnalytics": true, "title": "Active sessions", @@ -551,7 +551,7 @@ "customColumnWidthSetting": "15ch" }, "tooltipFormat": { - "tooltip": "Session [\"session_id\"] is blocked. The head blocker of the blocking chain is session [\"blocked_by\"]." + "tooltip": "[\"blocked_tooltip_text\"]" } }, { @@ -576,7 +576,7 @@ "customColumnWidthSetting": "12ch" }, "tooltipFormat": { - "tooltip": "Session [\"session_id\"] is a head blocker in a blocking chain. It blocks [\"count_blocked_sessions\"] other session(s)." + "tooltip": "[\"blocker_tooltip_text\"]" } }, { @@ -797,6 +797,14 @@ { "columnMatch": "create_index_statements", "formatter": 5 + }, + { + "columnMatch": "blocked_tooltip_text", + "formatter": 5 + }, + { + "columnMatch": "blocker_tooltip_text", + "formatter": 5 } ], "filter": true,