From 21303983f9007895d935f4396087537c48917e44 Mon Sep 17 00:00:00 2001 From: Jesse S Date: Thu, 19 Oct 2023 16:04:08 -0700 Subject: [PATCH] TOOLS-2669 TOOLS-2683 TOOLS-2705 TOOLS-2706 7.0 config/stats changes (#218) * feat: TOOLS-2669 TOOLS-2683 7.0 changes Including changes to `show stop-writes` and `summary` as well as bug fixes to both. * feat: TOOLS-2669 handle new set level data_used_bytes metric * fix: TOOLS-2705 fix `info set` `Usage%` calculation * feat: TOOLS-2706 add `Records Quota` to `info set` cmd --- lib/collectinfo_analyzer/info_controller.py | 2 + lib/health/query.py | 142 +- lib/health/query/health.hql | 154 +- lib/live_cluster/info_controller.py | 12 +- lib/live_cluster/show_controller.py | 1 - lib/utils/common.py | 707 ++++--- lib/view/templates.py | 453 ++++- lib/view/view.py | 550 +++--- test/unit/utils/test_common.py | 1941 ++++++++++++++----- test/unit/view/test_view.py | 307 ++- 10 files changed, 3008 insertions(+), 1261 deletions(-) diff --git a/lib/collectinfo_analyzer/info_controller.py b/lib/collectinfo_analyzer/info_controller.py index aa8e8ec6..f2aeaba0 100644 --- a/lib/collectinfo_analyzer/info_controller.py +++ b/lib/collectinfo_analyzer/info_controller.py @@ -253,6 +253,7 @@ def _do_default(self, line): @CommandHelp("Displays usage information for each namespace") def do_usage(self, line): ns_stats = self.stat_getter.get_namespace() + service_stats = self.stat_getter.get_service() for timestamp in sorted(ns_stats.keys()): if not ns_stats[timestamp]: @@ -260,6 +261,7 @@ def do_usage(self, line): self.view.info_namespace_usage( ns_stats[timestamp], + service_stats[timestamp], self.log_handler.get_cinfo_log_at(timestamp=timestamp), timestamp=timestamp, **self.mods, diff --git a/lib/health/query.py b/lib/health/query.py index 43d32a2c..d14e8528 100644 --- a/lib/health/query.py +++ b/lib/health/query.py @@ -113,16 +113,31 @@ "Disk utilization Anomaly."); -avail=select like(".*available_pct") as "free_disk" from NAMESPACE.STATISTICS save; -disk_free = select "device_free_pct" as "free_disk", "free-pct-disk" as "free_disk" from NAMESPACE.STATISTICS save; -r = do disk_free - avail save as "fragmented blocks pct"; +SET CONSTRAINT VERSION < 7.0; +avail = select "device_available_pct" as "free_disk", "pmem_available_pct" as "free_disk", "available_pct" as "free_disk" from NAMESPACE.STATISTICS save; +data_free = select "device_free_pct" as "free_disk", "pmem_free_pct" as "free_disk", "free-pct-disk" as "free_disk" from NAMESPACE.STATISTICS save; +r = do data_free - avail save as "fragmented blocks pct"; r = do r <= 30; r = group by CLUSTER, NAMESPACE r; ASSERT(r, True, "High (> 30%) fragmented blocks.", "PERFORMANCE", WARNING, "Listed namespace[s] have higher than normal (>30%) fragmented blocks at the time of sampling. Please run 'show config namespace like defrag' to check defrag configurations. Possible cause can be Aerospike disk defragmentation not keeping up with write rate and/or large record sizes causing fragmentation. Refer to knowledge base article discuss.aerospike.com/t/defragmentation for more details.", "Fragmented Blocks check."); +/* +Same as above but beginning in 7.0 we must flip data_used to get data_free +*/ +SET CONSTRAINT VERSION >= 7.0; +data_avail = select "data_avail_pct" as "free_disk" from NAMESPACE.STATISTICS save; +data_used = select "data_used_pct" as "free_disk" from NAMESPACE.STATISTICS save; +data_free = do 100 - data_used; +r = do data_free - data_avail save as "fragmented blocks pct"; +r = do r <= 30; +r = group by CLUSTER, NAMESPACE r; +ASSERT(r, True, "High (> 30%) fragmented blocks.", "PERFORMANCE", WARNING, + "Listed namespace[s] have higher than normal (>30%) fragmented blocks at the time of sampling. Please run 'show config namespace like defrag' to check defrag configurations. Possible cause can be Aerospike disk defragmentation not keeping up with write rate and/or large record sizes causing fragmentation. Refer to knowledge base article discuss.aerospike.com/t/defragmentation for more details.", + "Fragmented Blocks check."); +SET CONSTRAINT VERSION ALL; s = select "%iowait" from SYSTEM.IOSTAT save; r = do s > 10; ASSERT(r, False, "High (> 10%) CPU IO wait time.", "PERFORMANCE", WARNING, @@ -188,6 +203,8 @@ "Listed node[s] have lower than normal (< 20%) system free memory percentage. Please run 'show statistics service like system_free_mem_pct' to get actual values. Possible misconfiguration.", "System memory percentage check."); +SET CONSTRAINT VERSION < 7.0; + f = select "memory_free_pct" as "stats", "free-pct-memory" as "stats" from NAMESPACE.STATISTICS save; s = select "stop-writes-pct" as "stats" from NAMESPACE.CONFIG save; u = do 100 - f save as "memory_used_pct"; @@ -215,8 +232,8 @@ ASSERT(e, True, "Namespace configured to use more than 256G.", "LIMITS", WARNING, "On listed nodes namespace as mentioned have configured more than 256G of memory. Namespace with data not in memory can have max upto 4 billion keys and can utilize only up to 256G. Please run 'show statistics namespace like memory-size' to check configured memory.", "Namespace per node memory limit check."); -SET CONSTRAINT VERSION ALL; +SET CONSTRAINT VERSION < 7.0; /* Following query selects assigned memory-size from namespace config and total ram size from system statistics. group by for namespace stats sums all memory size and gives node level memory size. @@ -237,7 +254,7 @@ "Listed node[s] have less than 5G free memory available for Aerospike runtime. Please run 'show statistics namespace like memory-size' to check configured memory and check output of 'free' for system memory. Possible misconfiguration.", "Runtime memory configuration check."); - +SET CONSTRAINT VERSION ALL; /* Current configurations and config file values difference check */ @@ -290,20 +307,53 @@ "Listed node[s] show higher than normal client-connections (> 80% of the max configured proto-fd-max). Please run 'show config like proto-fd-max' and 'show statistics like client_connections' for actual values. Possible can be network issue / improper client behavior / FD leak.", "Client connections check."); -s = select like(".*available_pct") as "stats" from NAMESPACE.STATISTICS save; -m = select like(".*min-avail-pct") as "stats" from NAMESPACE.CONFIG save; -critical_check = do s >= m; -ASSERT(critical_check, True, "Low namespace disk available pct (stop-write enabled).", "OPERATIONS", CRITICAL, - "Listed namespace[s] have lower than normal (< min-avail-pct) available disk space. Probable cause - namespace size misconfiguration.", - "Critical Namespace disk available pct check."); - -critical_check = do s < m; -r = do s >= 20; -r = do r || critical_check; -ASSERT(r, True, "Low namespace disk available pct.", "OPERATIONS", WARNING, +SET CONSTRAINT VERSION < 7.0.0; +free = select "device_free_pct" as "stats", "pmem_free_pct" as "stats" from NAMESPACE.STATISTICS save; +used = do 100 - free save as "storage-engine used pct"; +stop_used_pct = select "storage-engine.max-used-pct" as "stats" from NAMESPACE.CONFIG save; +critical = do used <= stop_used_pct; +ASSERT(critical, True, "High namespace storage-engine used pct (stop-write enabled).", "OPERATIONS", CRITICAL, + "Listed namespace[s] have higher than normal used storage-engine space. Probable cause - namespace size misconfiguration.", + "Critical Namespace storage-engine used pct check."); + +avail = select "device_available_pct" as "stats", "pmem_available_pct" as "stats" from NAMESPACE.STATISTICS save; +stop_min_avail = select "storage-engine.min-avail-pct" as "stats" from NAMESPACE.CONFIG save; +critical = do avail >= stop_min_avail; +ASSERT(critical, True, "Low namespace storage-engine available pct (stop-write enabled).", "OPERATIONS", CRITICAL, + "Listed namespace[s] have lower than normal available storage-engine space. Probable cause - namespace size misconfiguration.", + "Critical Namespace storage-engine available pct check."); + +skip = do critical == False; +warn = do avail >= 20; +warn = do warn || skip; +ASSERT(warn, True, "Low namespace disk available pct.", "OPERATIONS", WARNING, "Listed namespace[s] have lower than normal (< 20 %) available disk space. Probable cause - namespace size misconfiguration.", "Namespace disk available pct check."); +SET CONSTRAINT VERSION >= 7.0.0; +used = select "data_used_pct" as "stats" from NAMESPACE.STATISTICS save; +stop_used_pct = select "storage-engine.stop-writes-used-pct" as "stats" from NAMESPACE.CONFIG save; +critical = do used <= stop_used_pct; +ASSERT(critical, True, "High namespace storage-engine used pct (stop-write enabled).", "OPERATIONS", CRITICAL, + "Listed namespace[s] have higher than normal used storage-engine space. Probable cause - namespace size misconfiguration.", + "Critical Namespace storage-engine used pct check."); + +avail = select "data_avail_pct" as "stats" from NAMESPACE.STATISTICS save; +stop_avail_pct = select "storage-engine.stop-writes-avail-pct" as "stats" from NAMESPACE.CONFIG save; +critical = do avail >= stop_avail_pct; +ASSERT(critical, True, "Low namespace storage-engine available pct (stop-write enabled).", "OPERATIONS", CRITICAL, + "Listed namespace[s] have lower than normal available storage-engine space. Probable cause - namespace size misconfiguration.", + "Critical Namespace storage-engine available pct check."); + +skip = do critical == False; +warn = do avail >= 20; +warn = do warn || skip; +ASSERT(warn, True, "Low namespace storage-engine available pct.", "OPERATIONS", WARNING, + "Listed namespace[s] have lower than normal (< 20 %) available storage-engine space. Probable cause - namespace size misconfiguration.", + "Namespace storage-engine available pct check."); + +SET CONSTRAINT VERSION ALL; + s = select * from SERVICE.CONFIG ignore "heartbeat.mtu", "node-id-interface", "node-id", "pidfile", like(".*address"), like(".*port") save; r = group by CLUSTER, KEY do NO_MATCH(s, ==, MAJORITY) save; ASSERT(r, False, "Different service configurations.", "OPERATIONS", WARNING, @@ -357,19 +407,30 @@ ASSERT(r, False, "Device name misconfigured.", "OPERATIONS", WARNING, "Listed device[s] have partitions on same node. This might create situation like data corruption where data written to main drive gets overwritten/corrupted from data written to or deleted from the partition with the same name.", "Device name misconfiguration check."); - -s = select "device_total_bytes", "device-total-bytes", "total-bytes-disk" from NAMESPACE.STATISTICS save; + +s = select "data_total_bytes", "device_total_bytes", "pmem_total_bytes", "device-total-bytes", "total-bytes-disk" from NAMESPACE.STATISTICS save; r = group by CLUSTER, NAMESPACE do NO_MATCH(s, ==, MAJORITY) save; ASSERT(r, False, "Different namespace device size configuration.", "OPERATIONS", WARNING, "Listed namespace[s] have difference in configured disk size. Please run 'show statistics namespace like bytes' to check total device size. Probable cause - config file misconfiguration.", "Namespace device size configuration difference check."); +SET CONSTRAINT VERSION < 4.9; hwm = select "high-water-disk-pct" from NAMESPACE.CONFIG save; hwm = group by CLUSTER, NAMESPACE hwm; r = do hwm == 50; -ASSERT(r, True, "Non-default namespace device high water mark configuration.", "OPERATIONS", INFO, - "Listed namespace[s] have non-default high water mark configuration. Please run 'show config namespace like high-water-disk-pct' to check value. Probable cause - config file misconfiguration.", - "Non-default namespace device high water mark check."); +ASSERT(r, True, "Non-default namespace storage-engine eviction threshold configuration.", "OPERATIONS", INFO, + "Listed namespace[s] have non-default eviction threshold configuration. Please run 'show config namespace like high-water evict-used' to check value. Probable cause - config file misconfiguration.", + "Non-default namespace storage-engine eviction threshold check."); + +SET CONSTRAINT VERSION >= 4.9; +hwm = select "high-water-disk-pct", "storage-engine.evict-used-pct" from NAMESPACE.CONFIG save; +hwm = group by CLUSTER, NAMESPACE hwm; +r = do hwm == 0; +ASSERT(r, True, "Non-default namespace storage-engine eviction threshold configuration.", "OPERATIONS", INFO, + "Listed namespace[s] have non-default eviction threshold configuration. Please run 'show config namespace like high-water evict-used' to check value. Probable cause - config file misconfiguration.", + "Non-default namespace storage-engine eviction threshold check."); + +SET CONSTRAINT VERSION ALL; lwm = select like(".*defrag-lwm-pct") from NAMESPACE.CONFIG save; lwm = group by CLUSTER, NAMESPACE lwm; @@ -378,12 +439,12 @@ "Listed namespace[s] have non-default low water mark configuration. Probable cause - config file misconfiguration.", "Non-default namespace device low water mark check."); -hwm = select "high-water-disk-pct" as "defrag-lwm-pct" from NAMESPACE.CONFIG save; +hwm = select "high-water-disk-pct" as "defrag-lwm-pct", "storage-engine.evict-used-pct" as "defrag-lwm-pct" from NAMESPACE.CONFIG save; lwm = select like(".*defrag-lwm-pct") as "defrag-lwm-pct" from NAMESPACE.CONFIG save; r = do lwm < hwm on common; r = group by CLUSTER, NAMESPACE r; ASSERT(r, False, "Defrag low water mark misconfigured.", "OPERATIONS", WARNING, - "Listed namespace[s] have defrag-lwm-pct lower than high-water-disk-pct. This might create situation like no block to write, no eviction and no defragmentation. Please run 'show config namespace like high-water-disk-pct defrag-lwm-pct' to check configured values. Probable cause - namespace watermark misconfiguration.", + "Listed namespace[s] have defrag-lwm-pct lower than eviction threshold. This might create situation like no block to write, no eviction and no defragmentation. Please run 'show config namespace like high-water evict-used defrag-lwm-pct' to check configured values. Probable cause - namespace watermark misconfiguration.", "Defrag low water mark misconfiguration check."); commit_to_device = select "storage-engine.commit-to-device" from NAMESPACE.CONFIG; @@ -434,21 +495,23 @@ with available space per node per namespace. */ -t = select "device_total_bytes" as "disk_space", "device-total-bytes" as "disk_space", "total-bytes-disk" as "disk_space" from NAMESPACE.STATISTICS; -u = select "used-bytes-disk" as "disk_space", "device_used_bytes" as "disk_space" from NAMESPACE.STATISTICS; +t = select "data_total_bytes" as "disk_space", "pmem_total_bytes" as "disk_space", "device_total_bytes" as "disk_space", "device-total-bytes" as "disk_space", "total-bytes-disk" as "disk_space" from NAMESPACE.STATISTICS; +u = select "data_used_bytes" as "disk_space", "pmem_used_bytes" as "disk_space", "device_used_bytes" as "disk_space", "used-bytes-disk" as "disk_space" from NAMESPACE.STATISTICS; /* Available extra space */ e = do t - u; -e = group by CLUSTER, NAMESPACE, NODE do SUM(e) save as "available device space"; +e = group by CLUSTER, NAMESPACE, NODE do SUM(e) save as "available storage space"; s = select "cluster_size" as "size" from SERVICE; n = do MAX(s); n = do n - 1; /* Extra space need if 1 node goes down */ e1 = do u / n; -e1 = group by CLUSTER, NAMESPACE do MAX(e1) save as "distribution share of used device space per node"; +e1 = group by CLUSTER, NAMESPACE do MAX(e1) save as "distribution share of used storage space per node"; r = do e > e1; -ASSERT(r, True, "Namespace under configured (disk) for single node failure.", "OPERATIONS", WARNING, - "Listed namespace[s] does not have enough disk space configured to deal with increase in data per node in case of 1 node failure. Please run 'show statistics namespace like bytes' to check device space. It is non-issue if single replica limit is set to larger values, i.e if number of replica copies are reduced in case of node loss.", - "Namespace single node failure disk config check."); +ASSERT(r, False, "Namespace storage under configured for single node failure.", "OPERATIONS", WARNING, + "Listed namespace[s] does not have enough storage space configured to deal with increase in data per node in case of 1 node failure. Please run 'show statistics namespace like bytes' to check storage space. It is non-issue if single replica limit is set to larger values, i.e if number of replica copies are reduced in case of node loss.", + "Namespace single node failure storage space config check."); + +SET CONSTRAINT VERSION < 7.0.0; /* Same as above query but for memory @@ -1440,8 +1503,7 @@ // Secondary Index Aggregation Query Statistics, fromally Query Agg statistics s = select "si_query_aggr_complete" as "val" from NAMESPACE.STATISTICS save; e = select "si_query_aggr_error" as "val" from NAMESPACE.STATISTICS save; -total_transactions = do s + e; -total_transaction = do total_transactions + a save as "total sindex query aggregations"; +total_transactions = do s + e save as "total sindex query aggregations"; total_transactions_per_sec = do total_transactions/u; total_transactions_per_sec = group by CLUSTER, NAMESPACE, NODE do MAX(total_transactions_per_sec); @@ -1557,7 +1619,7 @@ // Scan Basic statistics s = select "scan_basic_complete" as "cnt" from NAMESPACE.STATISTICS; -e = select "scan_basic_error", as "cnt" from NAMESPACE.STATISTICS; +e = select "scan_basic_error" as "cnt" from NAMESPACE.STATISTICS; total_transactions = do s + e save as "total basic scans"; total_transactions_per_sec = do total_transactions/u; total_transactions_per_sec = group by CLUSTER, NAMESPACE, NODE do MAX(total_transactions_per_sec); @@ -1937,7 +1999,7 @@ cluster_size = group by CLUSTER do MAX(cluster_size) save as "cluster-size"; repl = select "effective_replication_factor" as "sprig_limit_critical" from NAMESPACE.STATISTICS save as "effective_repl_factor"; pts = select "partition-tree-sprigs" as "sprig_limit_critical" from NAMESPACE.CONFIG save as "partition-tree-sprigs"; -size_limit = select "index-type.mounts-size-limit" as "sprig_limit_critical" from NAMESPACE.CONFIG; +size_limit = select "index-type.mounts-size-limit" as "sprig_limit_critical", "index-type.mounts-budget" as "sprig_limit_critical" from NAMESPACE.CONFIG; // below statement adds thousand delimiter to mounts-size-limiter when it prints size_limit = do size_limit * 1 save as "mounts-size-limit"; @@ -1958,11 +2020,12 @@ num_partitions = do 4096 * repl; partitions_per_node = do num_partitions/cluster_size; pts_per_node = do partitions_per_node * pts; +// 4K partition-tree-sprig overhead total_pts = do pts_per_node * 4096 save as "Minimum space required"; result = do total_pts > size_limit; ASSERT(result, False, "ALL FLASH - Too many sprigs per partition for current available index mounted space. Some records are likely failing to be created.", "OPERATIONS", CRITICAL, - "Minimum space required for sprig overhead at current cluster size exceeds mounts-size-limit. + "Minimum space required for sprig overhead at current cluster size exceeds index-mount size. See: https://www.aerospike.com/docs/operations/configure/namespace/index/#flash-index and https://www.aerospike.com/docs/operations/plan/capacity/#aerospike-all-flash", "Check for too many sprigs for current cluster size.", dont_skip); @@ -1973,9 +2036,9 @@ mcs = group by CLUSTER do MAX(mcs) save as "min-cluster-size"; repl = select "replication-factor" as "sprig_limit_warning" from NAMESPACE.STATISTICS; pts = select "partition-tree-sprigs" as "sprig_limit_warning" from NAMESPACE.CONFIG; -msl = select "index-type.mounts-size-limit" as "sprig_limit_warning" from NAMESPACE.CONFIG; +msl = select "index-type.mounts-size-limit" as "sprig_limit_warning", "index-type.mounts-budget" as "sprig_limit_warning" from NAMESPACE.CONFIG; // below statement adds thousand delimiter to mounts-size-limiter when it prints -msl = do msl * 1 save as "mounts-size-limit"; +msl = do msl * 1 save; // calculate sprig overhead // The replication factor should be min(repl, mcs) @@ -1989,7 +2052,7 @@ e1 = do repl_smaller && dont_skip; ASSERT(r1, False, "ALL FLASH - Too many sprigs per partition for configured min-cluster-size.", "OPERATIONS", WARNING, - "Minimum space required for sprig overhead at min-cluster-size exceeds mounts-size-limit. + "Minimum space required for sprig overhead at min-cluster-size exceeds index-mount size. See: https://www.aerospike.com/docs/operations/configure/namespace/index/#flash-index and https://www.aerospike.com/docs/operations/plan/capacity/#aerospike-all-flash", "Check for too many sprigs for minimum cluster size.", e1); @@ -1998,6 +2061,7 @@ // Only is asserted if min-cluster-size is smaller than replication-factor. // r2 = do 4096 * mcs; // r2 = do r2/mcs; +// 4096 * mcs / mcs = 4096 r2 = 4096; r2 = do r2 * pts; r2 = do r2 * 4096 save as "Minimum space required"; @@ -2007,7 +2071,7 @@ e2 = do mcs_smaller && dont_skip; ASSERT(r2, False, "ALL FLASH - Too many sprigs per partition for configured min-cluster-size.", "OPERATIONS", WARNING, - "Minimum space required for sprig overhead at min-cluster-size exceeds mounts-size-limit. + "Minimum space required for sprig overhead at min-cluster-size exceeds index-mount size. See: https://www.aerospike.com/docs/operations/configure/namespace/index/#flash-index and https://www.aerospike.com/docs/operations/plan/capacity/#aerospike-all-flash", "Check for too many sprigs for minimum cluster size.", e2); diff --git a/lib/health/query/health.hql b/lib/health/query/health.hql index ebc7d739..69a6e37d 100644 --- a/lib/health/query/health.hql +++ b/lib/health/query/health.hql @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -QUERIES = """ /*************************************************** * System Resource * ****************************************************/ @@ -113,16 +112,31 @@ ASSERT(r1, False, "Skewed cluster disk utilization.", "ANOMALY", WARNING, "Disk utilization Anomaly."); -avail=select like(".*available_pct") as "free_disk" from NAMESPACE.STATISTICS save; -disk_free = select "device_free_pct" as "free_disk", "free-pct-disk" as "free_disk" from NAMESPACE.STATISTICS save; -r = do disk_free - avail save as "fragmented blocks pct"; +SET CONSTRAINT VERSION < 7.0; +avail = select "device_available_pct" as "free_disk", "pmem_available_pct" as "free_disk", "available_pct" as "free_disk" from NAMESPACE.STATISTICS save; +data_free = select "device_free_pct" as "free_disk", "pmem_free_pct" as "free_disk", "free-pct-disk" as "free_disk" from NAMESPACE.STATISTICS save; +r = do data_free - avail save as "fragmented blocks pct"; r = do r <= 30; r = group by CLUSTER, NAMESPACE r; ASSERT(r, True, "High (> 30%) fragmented blocks.", "PERFORMANCE", WARNING, "Listed namespace[s] have higher than normal (>30%) fragmented blocks at the time of sampling. Please run 'show config namespace like defrag' to check defrag configurations. Possible cause can be Aerospike disk defragmentation not keeping up with write rate and/or large record sizes causing fragmentation. Refer to knowledge base article discuss.aerospike.com/t/defragmentation for more details.", "Fragmented Blocks check."); +/* +Same as above but beginning in 7.0 we must flip data_used to get data_free +*/ +SET CONSTRAINT VERSION >= 7.0; +data_avail = select "data_avail_pct" as "free_disk" from NAMESPACE.STATISTICS save; +data_used = select "data_used_pct" as "free_disk" from NAMESPACE.STATISTICS save; +data_free = do 100 - data_used; +r = do data_free - data_avail save as "fragmented blocks pct"; +r = do r <= 30; +r = group by CLUSTER, NAMESPACE r; +ASSERT(r, True, "High (> 30%) fragmented blocks.", "PERFORMANCE", WARNING, + "Listed namespace[s] have higher than normal (>30%) fragmented blocks at the time of sampling. Please run 'show config namespace like defrag' to check defrag configurations. Possible cause can be Aerospike disk defragmentation not keeping up with write rate and/or large record sizes causing fragmentation. Refer to knowledge base article discuss.aerospike.com/t/defragmentation for more details.", + "Fragmented Blocks check."); +SET CONSTRAINT VERSION ALL; s = select "%iowait" from SYSTEM.IOSTAT save; r = do s > 10; ASSERT(r, False, "High (> 10%) CPU IO wait time.", "PERFORMANCE", WARNING, @@ -188,6 +202,8 @@ ASSERT(r, False, "Low system memory percentage.", "LIMITS", CRITICAL, "Listed node[s] have lower than normal (< 20%) system free memory percentage. Please run 'show statistics service like system_free_mem_pct' to get actual values. Possible misconfiguration.", "System memory percentage check."); +SET CONSTRAINT VERSION < 7.0; + f = select "memory_free_pct" as "stats", "free-pct-memory" as "stats" from NAMESPACE.STATISTICS save; s = select "stop-writes-pct" as "stats" from NAMESPACE.CONFIG save; u = do 100 - f save as "memory_used_pct"; @@ -198,6 +214,7 @@ ASSERT(r, True, "Low namespace memory available pct (stop-write enabled).", "OPE /* NB : ADD CHECKS IF NODES ARE NOT HOMOGENOUS MEM / NUM CPU etc */ +SET CONSTRAINT VERSION ALL; s = select "available_bin_names", "available-bin-names" from NAMESPACE save; r = group by NAMESPACE do s > 3200; @@ -214,8 +231,8 @@ e = do r <= 274877906944; ASSERT(e, True, "Namespace configured to use more than 256G.", "LIMITS", WARNING, "On listed nodes namespace as mentioned have configured more than 256G of memory. Namespace with data not in memory can have max upto 4 billion keys and can utilize only up to 256G. Please run 'show statistics namespace like memory-size' to check configured memory.", "Namespace per node memory limit check."); -SET CONSTRAINT VERSION ALL; +SET CONSTRAINT VERSION < 7.0; /* Following query selects assigned memory-size from namespace config and total ram size from system statistics. group by for namespace stats sums all memory size and gives node level memory size. @@ -236,7 +253,7 @@ ASSERT(r, True, "Aerospike runtime memory configured < 5G.", "LIMITS", INFO, "Listed node[s] have less than 5G free memory available for Aerospike runtime. Please run 'show statistics namespace like memory-size' to check configured memory and check output of 'free' for system memory. Possible misconfiguration.", "Runtime memory configuration check."); - +SET CONSTRAINT VERSION ALL; /* Current configurations and config file values difference check */ @@ -289,20 +306,53 @@ ASSERT(r, False, "High system client connections.", "OPERATIONS", WARNING, "Listed node[s] show higher than normal client-connections (> 80% of the max configured proto-fd-max). Please run 'show config like proto-fd-max' and 'show statistics like client_connections' for actual values. Possible can be network issue / improper client behavior / FD leak.", "Client connections check."); -s = select like(".*available_pct") as "stats" from NAMESPACE.STATISTICS save; -m = select like(".*min-avail-pct") as "stats" from NAMESPACE.CONFIG save; -critical_check = do s >= m; -ASSERT(critical_check, True, "Low namespace disk available pct (stop-write enabled).", "OPERATIONS", CRITICAL, - "Listed namespace[s] have lower than normal (< min-avail-pct) available disk space. Probable cause - namespace size misconfiguration.", - "Critical Namespace disk available pct check."); - -critical_check = do s < m; -r = do s >= 20; -r = do r || critical_check; -ASSERT(r, True, "Low namespace disk available pct.", "OPERATIONS", WARNING, +SET CONSTRAINT VERSION < 7.0.0; +free = select "device_free_pct" as "stats", "pmem_free_pct" as "stats" from NAMESPACE.STATISTICS save; +used = do 100 - free save as "storage-engine used pct"; +stop_used_pct = select "storage-engine.stop-writes-used-pct" as "stats", "storage-engine.max-used-pct" as "stats" from NAMESPACE.CONFIG save; +critical = do used <= stop_used_pct; +ASSERT(critical, True, "High namespace storage-engine used pct (stop-write enabled).", "OPERATIONS", CRITICAL, + "Listed namespace[s] have higher than normal used storage-engine space. Probable cause - namespace size misconfiguration.", + "Critical Namespace storage-engine used pct check."); + +avail = select "device_available_pct" as "stats", "pmem_available_pct" as "stats" from NAMESPACE.STATISTICS save; +stop_min_avail = select "storage-engine.min-avail-pct" as "stats" from NAMESPACE.CONFIG save; +critical = do avail >= stop_min_avail; +ASSERT(critical, True, "Low namespace storage-engine available pct (stop-write enabled).", "OPERATIONS", CRITICAL, + "Listed namespace[s] have lower than normal available storage-engine space. Probable cause - namespace size misconfiguration.", + "Critical Namespace storage-engine available pct check."); + +skip = do critical == False; +warn = do avail >= 20; +warn = do warn || skip; +ASSERT(warn, True, "Low namespace disk available pct.", "OPERATIONS", WARNING, "Listed namespace[s] have lower than normal (< 20 %) available disk space. Probable cause - namespace size misconfiguration.", "Namespace disk available pct check."); +SET CONSTRAINT VERSION >= 7.0.0; +used = select "data_used_pct" as "stats" from NAMESPACE.STATISTICS save; +stop_used_pct = select "storage-engine.stop-writes-used-pct" as "stats" from NAMESPACE.CONFIG save; +critical = do used <= stop_used_pct; +ASSERT(critical, True, "High namespace storage-engine used pct (stop-write enabled).", "OPERATIONS", CRITICAL, + "Listed namespace[s] have higher than normal used storage-engine space. Probable cause - namespace size misconfiguration.", + "Critical Namespace storage-engine used pct check."); + +avail = select "data_avail_pct" as "stats" from NAMESPACE.STATISTICS save; +stop_avail_pct = select "storage-engine.stop-writes-avail-pct" as "stats" from NAMESPACE.CONFIG save; +critical = do avail >= stop_avail_pct; +ASSERT(critical, True, "Low namespace storage-engine available pct (stop-write enabled).", "OPERATIONS", CRITICAL, + "Listed namespace[s] have lower than normal available storage-engine space. Probable cause - namespace size misconfiguration.", + "Critical Namespace storage-engine available pct check."); + +skip = do critical == False; +warn = do avail >= 20; +warn = do warn || skip; +ASSERT(warn, True, "Low namespace storage-engine available pct.", "OPERATIONS", WARNING, + "Listed namespace[s] have lower than normal (< 20 %) available storage-engine space. Probable cause - namespace size misconfiguration.", + "Namespace storage-engine available pct check."); + +SET CONSTRAINT VERSION ALL; + s = select * from SERVICE.CONFIG ignore "heartbeat.mtu", "node-id-interface", "node-id", "pidfile", like(".*address"), like(".*port") save; r = group by CLUSTER, KEY do NO_MATCH(s, ==, MAJORITY) save; ASSERT(r, False, "Different service configurations.", "OPERATIONS", WARNING, @@ -312,7 +362,7 @@ ASSERT(r, False, "Different service configurations.", "OPERATIONS", WARNING, multicast_mode_enabled = select like(".*mode") from NETWORK.CONFIG; multicast_mode_enabled = do multicast_mode_enabled == "multicast"; multicast_mode_enabled = group by CLUSTER, NODE do OR(multicast_mode_enabled); -s = select like(".*mtu") from SERVICE.CONFIG save; +s = select like(".*mtu") from NETWORK.CONFIG save; r = group by CLUSTER do NO_MATCH(s, ==, MAJORITY) save; ASSERT(r, False, "Different heartbeat.mtu.", "OPERATIONS", WARNING, "Listed node[s] have a different heartbeat.mtu configured. A multicast packet can only be as large as the interface mtu. Different mtu values might create cluster stability issue. Please contact Aerospike Support team.", @@ -356,19 +406,30 @@ r = do APPLY_TO_ANY(d, IN, f); ASSERT(r, False, "Device name misconfigured.", "OPERATIONS", WARNING, "Listed device[s] have partitions on same node. This might create situation like data corruption where data written to main drive gets overwritten/corrupted from data written to or deleted from the partition with the same name.", "Device name misconfiguration check."); - -s = select "device_total_bytes", "device-total-bytes", "total-bytes-disk" from NAMESPACE.STATISTICS save; + +s = select "data_total_bytes", "device_total_bytes", "pmem_total_bytes", "device-total-bytes", "total-bytes-disk" from NAMESPACE.STATISTICS save; r = group by CLUSTER, NAMESPACE do NO_MATCH(s, ==, MAJORITY) save; ASSERT(r, False, "Different namespace device size configuration.", "OPERATIONS", WARNING, "Listed namespace[s] have difference in configured disk size. Please run 'show statistics namespace like bytes' to check total device size. Probable cause - config file misconfiguration.", "Namespace device size configuration difference check."); +SET CONSTRAINT VERSION < 4.9; hwm = select "high-water-disk-pct" from NAMESPACE.CONFIG save; hwm = group by CLUSTER, NAMESPACE hwm; r = do hwm == 50; -ASSERT(r, True, "Non-default namespace device high water mark configuration.", "OPERATIONS", INFO, - "Listed namespace[s] have non-default high water mark configuration. Please run 'show config namespace like high-water-disk-pct' to check value. Probable cause - config file misconfiguration.", - "Non-default namespace device high water mark check."); +ASSERT(r, True, "Non-default namespace storage-engine eviction threshold configuration.", "OPERATIONS", INFO, + "Listed namespace[s] have non-default eviction threshold configuration. Please run 'show config namespace like high-water evict-used' to check value. Probable cause - config file misconfiguration.", + "Non-default namespace storage-engine eviction threshold check."); + +SET CONSTRAINT VERSION >= 4.9; +hwm = select "high-water-disk-pct", "storage-engine.evict-used-pct" from NAMESPACE.CONFIG save; +hwm = group by CLUSTER, NAMESPACE hwm; +r = do hwm == 0; +ASSERT(r, True, "Non-default namespace storage-engine eviction threshold configuration.", "OPERATIONS", INFO, + "Listed namespace[s] have non-default eviction threshold configuration. Please run 'show config namespace like high-water evict-used' to check value. Probable cause - config file misconfiguration.", + "Non-default namespace storage-engine eviction threshold check."); + +SET CONSTRAINT VERSION ALL; lwm = select like(".*defrag-lwm-pct") from NAMESPACE.CONFIG save; lwm = group by CLUSTER, NAMESPACE lwm; @@ -377,12 +438,12 @@ ASSERT(r, True, "Non-default namespace device low water mark configuration.", "O "Listed namespace[s] have non-default low water mark configuration. Probable cause - config file misconfiguration.", "Non-default namespace device low water mark check."); -hwm = select "high-water-disk-pct" as "defrag-lwm-pct" from NAMESPACE.CONFIG save; +hwm = select "high-water-disk-pct" as "defrag-lwm-pct", "storage-engine.evict-used-pct" as "defrag-lwm-pct" from NAMESPACE.CONFIG save; lwm = select like(".*defrag-lwm-pct") as "defrag-lwm-pct" from NAMESPACE.CONFIG save; r = do lwm < hwm on common; r = group by CLUSTER, NAMESPACE r; ASSERT(r, False, "Defrag low water mark misconfigured.", "OPERATIONS", WARNING, - "Listed namespace[s] have defrag-lwm-pct lower than high-water-disk-pct. This might create situation like no block to write, no eviction and no defragmentation. Please run 'show config namespace like high-water-disk-pct defrag-lwm-pct' to check configured values. Probable cause - namespace watermark misconfiguration.", + "Listed namespace[s] have defrag-lwm-pct lower than eviction threshold. This might create situation like no block to write, no eviction and no defragmentation. Please run 'show config namespace like high-water evict-used defrag-lwm-pct' to check configured values. Probable cause - namespace watermark misconfiguration.", "Defrag low water mark misconfiguration check."); commit_to_device = select "storage-engine.commit-to-device" from NAMESPACE.CONFIG; @@ -407,13 +468,13 @@ ASSERT(r, True, "Number of Sets equal to or above 750", "LIMITS", INFO, stop_writes = select "stop_writes" from NAMESPACE.STATISTICS; stop_writes = group by CLUSTER, NAMESPACE stop_writes; ASSERT(stop_writes, False, "Namespace has hit stop-writes (stop_writes = true)", "OPERATIONS" , CRITICAL, - "Listed namespace(s) have hit stop-write. Please run 'show statistics namespace like stop_writes' for details.", + "Listed namespace(s) have hit stop-write. Please run 'show stop-writes' for details.", "Namespace stop-writes flag check."); clock_skew_stop_writes = select "clock_skew_stop_writes" from NAMESPACE.STATISTICS; clock_skew_stop_writes = group by CLUSTER, NAMESPACE clock_skew_stop_writes; ASSERT(clock_skew_stop_writes, False, "Namespace has hit clock-skew-stop-writes (clock_skew_stop_writes = true)", "OPERATIONS" , CRITICAL, - "Listed namespace(s) have hit clock-skew-stop-writes. Please run 'show statistics namespace like clock_skew_stop_writes' for details.", + "Listed namespace(s) have hit clock-skew-stop-writes. Please run 'show stop-writes' for details.", "Namespace clock-skew-stop-writes flag check."); SET CONSTRAINT VERSION < 4.3; @@ -433,21 +494,23 @@ It collects cluster-size and uses it to find out expected data distribution for with available space per node per namespace. */ -t = select "device_total_bytes" as "disk_space", "device-total-bytes" as "disk_space", "total-bytes-disk" as "disk_space" from NAMESPACE.STATISTICS; -u = select "used-bytes-disk" as "disk_space", "device_used_bytes" as "disk_space" from NAMESPACE.STATISTICS; +t = select "data_total_bytes" as "disk_space", "pmem_total_bytes" as "disk_space", "device_total_bytes" as "disk_space", "device-total-bytes" as "disk_space", "total-bytes-disk" as "disk_space" from NAMESPACE.STATISTICS; +u = select "data_used_bytes" as "disk_space", "pmem_used_bytes" as "disk_space", "device_used_bytes" as "disk_space", "used-bytes-disk" as "disk_space" from NAMESPACE.STATISTICS; /* Available extra space */ e = do t - u; -e = group by CLUSTER, NAMESPACE, NODE do SUM(e) save as "available device space"; +e = group by CLUSTER, NAMESPACE, NODE do SUM(e) save as "available storage space"; s = select "cluster_size" as "size" from SERVICE; n = do MAX(s); n = do n - 1; /* Extra space need if 1 node goes down */ e1 = do u / n; -e1 = group by CLUSTER, NAMESPACE do MAX(e1) save as "distribution share of used device space per node"; +e1 = group by CLUSTER, NAMESPACE do MAX(e1) save as "distribution share of used storage space per node"; r = do e > e1; -ASSERT(r, True, "Namespace under configured (disk) for single node failure.", "OPERATIONS", WARNING, - "Listed namespace[s] does not have enough disk space configured to deal with increase in data per node in case of 1 node failure. Please run 'show statistics namespace like bytes' to check device space. It is non-issue if single replica limit is set to larger values, i.e if number of replica copies are reduced in case of node loss.", - "Namespace single node failure disk config check."); +ASSERT(r, False, "Namespace storage under configured for single node failure.", "OPERATIONS", WARNING, + "Listed namespace[s] does not have enough storage space configured to deal with increase in data per node in case of 1 node failure. Please run 'show statistics namespace like bytes' to check storage space. It is non-issue if single replica limit is set to larger values, i.e if number of replica copies are reduced in case of node loss.", + "Namespace single node failure storage space config check."); + +SET CONSTRAINT VERSION < 7.0.0; /* Same as above query but for memory @@ -609,7 +672,7 @@ u = group by CLUSTER, NODE do MAX(u); s = do s / u on common; r = group by CLUSTER, DC, KEY do SD_ANOMALY(s, ==, 3); ASSERT(r, False, "Skewed cluster remote DC statistics.", "ANOMALY", WARNING, - "Listed DC statistic[s] show skew for the listed node[s]. Please run 'show statistics dc' to get all DC stats. May be non-issue if remote Data center connectivity behavior for nodes is not same.", + "Listed DC statistic[s] show skew for the listed node[s]. Please run 'show statistics dc' to get all DC stats. May be non-issue if remote Data center connectivity behavior for nodes is not same.", "Remote DC statistics anomaly check."); /* @@ -1439,8 +1502,7 @@ ASSERT(r, True, "Non-zero sindex basic short query errors", "OPERATIONS", INFO, // Secondary Index Aggregation Query Statistics, fromally Query Agg statistics s = select "si_query_aggr_complete" as "val" from NAMESPACE.STATISTICS save; e = select "si_query_aggr_error" as "val" from NAMESPACE.STATISTICS save; -total_transactions = do s + e; -total_transaction = do total_transactions + a save as "total sindex query aggregations"; +total_transactions = do s + e save as "total sindex query aggregations"; total_transactions_per_sec = do total_transactions/u; total_transactions_per_sec = group by CLUSTER, NAMESPACE, NODE do MAX(total_transactions_per_sec); @@ -1556,7 +1618,7 @@ ASSERT(r, True, "Non-zero scan aggregation errors", "OPERATIONS", INFO, // Scan Basic statistics s = select "scan_basic_complete" as "cnt" from NAMESPACE.STATISTICS; -e = select "scan_basic_error", as "cnt" from NAMESPACE.STATISTICS; +e = select "scan_basic_error" as "cnt" from NAMESPACE.STATISTICS; total_transactions = do s + e save as "total basic scans"; total_transactions_per_sec = do total_transactions/u; total_transactions_per_sec = group by CLUSTER, NAMESPACE, NODE do MAX(total_transactions_per_sec); @@ -1936,7 +1998,7 @@ cluster_size = select "cluster_size" as "sprig_limit_critical" from SERVICE.STAT cluster_size = group by CLUSTER do MAX(cluster_size) save as "cluster-size"; repl = select "effective_replication_factor" as "sprig_limit_critical" from NAMESPACE.STATISTICS save as "effective_repl_factor"; pts = select "partition-tree-sprigs" as "sprig_limit_critical" from NAMESPACE.CONFIG save as "partition-tree-sprigs"; -size_limit = select "index-type.mounts-size-limit" as "sprig_limit_critical" from NAMESPACE.CONFIG; +size_limit = select "index-type.mounts-size-limit" as "sprig_limit_critical", "index-type.mounts-budget" as "sprig_limit_critical" from NAMESPACE.CONFIG; // below statement adds thousand delimiter to mounts-size-limiter when it prints size_limit = do size_limit * 1 save as "mounts-size-limit"; @@ -1957,11 +2019,12 @@ dont_skip = group by CLUSTER, NODE, NAMESPACE do OR(dont_skip); num_partitions = do 4096 * repl; partitions_per_node = do num_partitions/cluster_size; pts_per_node = do partitions_per_node * pts; +// 4K partition-tree-sprig overhead total_pts = do pts_per_node * 4096 save as "Minimum space required"; result = do total_pts > size_limit; ASSERT(result, False, "ALL FLASH - Too many sprigs per partition for current available index mounted space. Some records are likely failing to be created.", "OPERATIONS", CRITICAL, - "Minimum space required for sprig overhead at current cluster size exceeds mounts-size-limit. + "Minimum space required for sprig overhead at current cluster size exceeds mounts-budget/mounts-size-limit. See: https://www.aerospike.com/docs/operations/configure/namespace/index/#flash-index and https://www.aerospike.com/docs/operations/plan/capacity/#aerospike-all-flash", "Check for too many sprigs for current cluster size.", dont_skip); @@ -1972,9 +2035,9 @@ mcs = select "min-cluster-size" as "sprig_limit_warning" from SERVICE; mcs = group by CLUSTER do MAX(mcs) save as "min-cluster-size"; repl = select "replication-factor" as "sprig_limit_warning" from NAMESPACE.STATISTICS; pts = select "partition-tree-sprigs" as "sprig_limit_warning" from NAMESPACE.CONFIG; -msl = select "index-type.mounts-size-limit" as "sprig_limit_warning" from NAMESPACE.CONFIG; +msl = select "index-type.mounts-size-limit" as "sprig_limit_warning", "index-type.mounts-budget" as "sprig_limit_warning" from NAMESPACE.CONFIG; // below statement adds thousand delimiter to mounts-size-limiter when it prints -msl = do msl * 1 save as "mounts-size-limit"; +msl = do msl * 1 save; // calculate sprig overhead // The replication factor should be min(repl, mcs) @@ -1988,7 +2051,7 @@ repl_smaller = do repl < mcs; e1 = do repl_smaller && dont_skip; ASSERT(r1, False, "ALL FLASH - Too many sprigs per partition for configured min-cluster-size.", "OPERATIONS", WARNING, - "Minimum space required for sprig overhead at min-cluster-size exceeds mounts-size-limit. + "Minimum space required for sprig overhead at min-cluster-size exceeds index-mount size. See: https://www.aerospike.com/docs/operations/configure/namespace/index/#flash-index and https://www.aerospike.com/docs/operations/plan/capacity/#aerospike-all-flash", "Check for too many sprigs for minimum cluster size.", e1); @@ -1997,6 +2060,7 @@ ASSERT(r1, False, "ALL FLASH - Too many sprigs per partition for configured min- // Only is asserted if min-cluster-size is smaller than replication-factor. // r2 = do 4096 * mcs; // r2 = do r2/mcs; +// 4096 * mcs / mcs = 4096 r2 = 4096; r2 = do r2 * pts; r2 = do r2 * 4096 save as "Minimum space required"; @@ -2006,7 +2070,7 @@ mcs_smaller = do mcs <= repl; e2 = do mcs_smaller && dont_skip; ASSERT(r2, False, "ALL FLASH - Too many sprigs per partition for configured min-cluster-size.", "OPERATIONS", WARNING, - "Minimum space required for sprig overhead at min-cluster-size exceeds mounts-size-limit. + "Minimum space required for sprig overhead at min-cluster-size exceeds index-mount size. See: https://www.aerospike.com/docs/operations/configure/namespace/index/#flash-index and https://www.aerospike.com/docs/operations/plan/capacity/#aerospike-all-flash", "Check for too many sprigs for minimum cluster size.", e2); @@ -2088,5 +2152,3 @@ ASSERT(m, False, "Outlier[s] detected by the server health check.", "OPERATIONS" "Server health check outlier detection. Run command 'asinfo -v health-outliers' to see list of outliers"); SET CONSTRAINT VERSION ALL; - -""" diff --git a/lib/live_cluster/info_controller.py b/lib/live_cluster/info_controller.py index 03d7c31c..f700346e 100644 --- a/lib/live_cluster/info_controller.py +++ b/lib/live_cluster/info_controller.py @@ -238,6 +238,7 @@ class InfoNamespaceController(LiveClusterCommandController): def __init__(self, get_futures=False): self.modifiers = set(["with"]) self.get_futures = get_futures + self.stats_getter = GetStatisticsController(self.cluster) @CommandHelp( "Displays usage and objects information for each namespace", @@ -259,9 +260,16 @@ async def _do_default(self, line): modifiers=(with_modifier_help,), ) async def do_usage(self, line): - stats = await self.cluster.info_all_namespace_statistics(nodes=self.nodes) + service_stats, ns_stats = await asyncio.gather( + self.stats_getter.get_service(nodes=self.nodes), + self.stats_getter.get_namespace(nodes=self.nodes), + ) # Includes stats and configs return util.callable( - self.view.info_namespace_usage, stats, self.cluster, **self.mods + self.view.info_namespace_usage, + ns_stats, + service_stats, + self.cluster, + **self.mods, ) @CommandHelp( diff --git a/lib/live_cluster/show_controller.py b/lib/live_cluster/show_controller.py index c696cd75..c65f5742 100644 --- a/lib/live_cluster/show_controller.py +++ b/lib/live_cluster/show_controller.py @@ -902,7 +902,6 @@ def __init__(self): ) async def _do_default(self, line): return await asyncio.gather( - self.do_bins(line[:]), self.do_sets(line[:]), self.do_service(line[:]), self.do_namespace(line[:]), diff --git a/lib/utils/common.py b/lib/utils/common.py index ee456f53..95ad2131 100644 --- a/lib/utils/common.py +++ b/lib/utils/common.py @@ -31,11 +31,11 @@ Union, Callable, ) -from typing_extensions import NotRequired +from typing_extensions import NotRequired, Required import distro import socket import time -from urllib import request, error, parse +from urllib import request import aiohttp import zipfile from collections import OrderedDict @@ -63,7 +63,7 @@ "!=": operator.ne, } -CompareValue = str | int | bool +CompareValue = str | int | bool | float CompareCallable = Callable[[Any, Any], bool] CheckCallback = Callable[ [dict[str, Any], tuple[str, ...], CompareCallable, str | int | bool], bool @@ -74,7 +74,7 @@ def _check_value( data: dict[str, Any], keys: tuple[str], op: Callable[[Any, Any], bool], - value: int | str | bool, + value: int | str | bool | float, ): """ Function takes dictionary, and keys to compare. @@ -89,17 +89,17 @@ def _check_value( for key in keys: k = key - dv = 0 - type_check = int + default_value = None + type_check = float if isinstance(value, str): - dv = None + default_value = None type_check = str if isinstance(value, bool): - dv = False + default_value = False type_check = bool - fetched_value = util.get_value_from_dict(data, k, dv, type_check) + fetched_value = util.get_value_from_dict(data, k, default_value, type_check) if fetched_value is None: continue @@ -163,6 +163,7 @@ def __init__( compare_op (CompareCallable, optional): A function that tells how we should compare the field value to compare_val. Defaults to operator.gt. compare_val (CompareValue, optional): The value to use in the comparison to each field value. Defaults to 0. """ + self._source = source self._fields: tuple[str, ...] = fields self._check_func = check_func @@ -473,8 +474,13 @@ def __str__(self): ), ), FeatureCheck( - "Index-on-device", - NamespacesFieldCheck("namespaces", ("index_flash_used_bytes",)), + "Index-on-flash", + NamespacesFieldCheck( + "namespaces", + ("index-type",), + comp_ops["=="], + "flash", + ), ), FeatureCheck( "Index-on-pmem", @@ -485,6 +491,28 @@ def __str__(self): "pmem", ), ), + FeatureCheck( + "Index-on-shmem", + NamespacesFieldCheck( + "namespaces", + ("index-type",), + comp_ops["=="], + "shmem", + ), + ), + FeatureCheck( + "Compression", + NamespacesFieldCheck( + "namespaces", + ( + "data_compression_ratio", # Added in 7.0 + "pmem_compression_ratio", + "device_compression_ratio", + ), + comp_ops["<"], + 1.0, + ), + ), ) @@ -734,39 +762,23 @@ def _set_migration_status(namespace_stats, cluster_dict, ns_dict): cluster_dict["migrations_in_progress"] = True -class SummaryClusterLicenseAggOptionalDict(TypedDict, total=False): +class SummaryClusterLicenseAggDict(TypedDict, total=False): min: int max: int avg: int latest_time: datetime.datetime + latest: Required[int] -class SummaryClusterLicenseAggRequiredDict(TypedDict): - latest: int - - -class SummaryClusterLicenseAggDict( - SummaryClusterLicenseAggOptionalDict, SummaryClusterLicenseAggRequiredDict -): - pass - - -class SummaryStorageUsageDict(TypedDict): +class SummaryStorageUsageDict(TypedDict, total=False): total: int avail: int avail_pct: float - used: int + used: Required[int] used_pct: float -class SummaryClusterOptionalDict(TypedDict, total=False): - device: SummaryStorageUsageDict - pmem: SummaryStorageUsageDict - pmem_index: SummaryStorageUsageDict - flash_index: SummaryStorageUsageDict - - -class SummaryClusterRequiredDict(TypedDict): +class SummaryClusterDict(TypedDict): server_version: list[str] os_version: list[str] cluster_size: list[int] @@ -779,37 +791,36 @@ class SummaryClusterRequiredDict(TypedDict): active_ns: int ns_count: int license_data: SummaryClusterLicenseAggDict - memory: SummaryStorageUsageDict - - -class SummaryClusterDict(SummaryClusterOptionalDict, SummaryClusterRequiredDict): - pass - - -class SummaryNamespaceOptionalDict(TypedDict, total=False): - compression_ratio: float - cache_read_pct: int - device: SummaryStorageUsageDict - pmem: SummaryStorageUsageDict - pmem_index: SummaryStorageUsageDict - flash_index: SummaryStorageUsageDict - - -class SummaryNamespaceRequiredDict(TypedDict): + memory_data_and_indexes: NotRequired[SummaryStorageUsageDict] # pre 7.0 + memory: NotRequired[SummaryStorageUsageDict] # post 7.0 + device: NotRequired[SummaryStorageUsageDict] + pmem: NotRequired[SummaryStorageUsageDict] + data: NotRequired[SummaryStorageUsageDict] # added for compatibility with 7.0 + pmem_index: NotRequired[SummaryStorageUsageDict] + flash_index: NotRequired[SummaryStorageUsageDict] + shmem_index: NotRequired[SummaryStorageUsageDict] + index: NotRequired[SummaryStorageUsageDict] # added for compatibility with 7.0 + + +class SummaryNamespaceDict(TypedDict): devices_total: int devices_per_node: int device_count_same_across_nodes: bool repl_factor: list[int] master_objects: int migrations_in_progress: bool - index_type: str # TODO: should be Union[Literal["pmem"], Literal["flash"], Literal["shmem"]] - memory: SummaryStorageUsageDict rack_aware: bool license_data: SummaryClusterLicenseAggDict - - -class SummaryNamespaceDict(SummaryNamespaceOptionalDict, SummaryNamespaceRequiredDict): - pass + index_type: NotRequired[str] + compression_ratio: NotRequired[float] + cache_read_pct: NotRequired[int] + memory_data_and_indexes: NotRequired[SummaryStorageUsageDict] # pre 7.0 + memory: NotRequired[SummaryStorageUsageDict] # post 7.0 + device: NotRequired[SummaryStorageUsageDict] + pmem: NotRequired[SummaryStorageUsageDict] + pmem_index: NotRequired[SummaryStorageUsageDict] + flash_index: NotRequired[SummaryStorageUsageDict] + shmem_index: NotRequired[SummaryStorageUsageDict] SummaryNamespacesDict = NamespaceDict[SummaryNamespaceDict] @@ -837,13 +848,6 @@ def _initialize_summary_output(ns_list) -> SummaryDict: "device_count": 0, "device_count_per_node": 0, "device_count_same_across_nodes": True, - "memory": { - "total": 0, - "used": 0, - "used_pct": 0.0, - "avail": 0, - "avail_pct": 0.0, - }, "active_ns": 0, "ns_count": 0, "license_data": {"latest": 0}, @@ -859,15 +863,6 @@ def _initialize_summary_output(ns_list) -> SummaryDict: "repl_factor": [], "master_objects": 0, "migrations_in_progress": False, - # Memory is always used regardless of configuration - "memory": { - "total": 0, - "used": 0, - "used_pct": 0.0, - "avail": 0, - "avail_pct": 0.0, - }, - "index_type": "shmem", "rack_aware": False, "license_data": {"latest": 0}, } @@ -1002,8 +997,7 @@ def _manually_compute_license_data_size( for host_id, host_stats in ns_stats.items(): host_memory_bytes = 0.0 - host_device_bytes = 0.0 - host_pmem_bytes = 0.0 + host_data_bytes = 0.0 host_master_objects = 0 if not host_stats or isinstance(host_stats, Exception): @@ -1037,39 +1031,27 @@ def _manually_compute_license_data_size( return_type=int, ) - host_device_compression_ratio = util.get_value_from_dict( - host_stats, - "device_compression_ratio", - default_value=1.0, - return_type=float, - ) - - host_pmem_compression_ratio = util.get_value_from_dict( + host_data_compression_ratio = util.get_value_from_dict( host_stats, - "pmem_compression_ratio", + ( + "data_compression_ratio", + "pmem_compression_ratio", + "device_compression_ratio", + ), default_value=1.0, return_type=float, ) - host_device_bytes = util.get_value_from_dict( - host_stats, - "device_used_bytes", - default_value=0.0, - return_type=float, - ) - - host_device_bytes /= host_device_compression_ratio - - host_pmem_bytes = util.get_value_from_dict( + host_data_bytes = util.get_value_from_dict( host_stats, - "pmem_used_bytes", + ("data_used_bytes", "device_used_bytes", "pmem_used_bytes"), default_value=0.0, return_type=float, ) - host_pmem_bytes /= host_pmem_compression_ratio + host_data_bytes /= host_data_compression_ratio - if host_pmem_bytes == 0.0 and host_device_bytes == 0.0: + if host_data_bytes == 0.0: host_memory_bytes += util.get_value_from_dict( host_stats, "memory_used_index_bytes", @@ -1101,7 +1083,7 @@ def _manually_compute_license_data_size( ) <= version.LooseVersion(host_build_version): host_record_overhead = 39 - host_unique_data = host_memory_bytes + host_pmem_bytes + host_device_bytes + host_unique_data = host_memory_bytes + host_data_bytes ns_unique_data += host_unique_data ns_record_overhead += host_master_objects * host_record_overhead ns_master_objects += host_master_objects @@ -1154,7 +1136,7 @@ def create_summary( ns_configs={}, security_configs={}, license_data_usage: UDAResponsesDict | None = None, -): +) -> SummaryDict: """ Function takes four dictionaries service stats, namespace stats, set stats and metadata. Returns dictionary with summary information. @@ -1176,22 +1158,34 @@ def create_summary( total_nodes = len(service_stats.keys()) - cl_memory_size_total = 0 - cl_memory_size_avail = 0 - cl_pmem_index_size_total = 0 - cl_pmem_index_size_avail = 0 - cl_flash_index_size_total = 0 - cl_flash_index_size_avail = 0 + # Pre 7.0 memory stats. Data + index + sindex + set index bytes + cluster_memory_data_and_indexes_total = 0 + cluster_memory_data_and_indexes_used = 0 + + cluster_shmem_index_used = 0 # index-type.shmem does not report total + + cluster_pmem_index_total = 0 + cluster_pmem_index_used = 0 - cl_nodewise_device_counts = {} + cluster_flash_index_total = 0 + cluster_flash_index_used = 0 - cl_nodewise_device_size = {} - cl_nodewise_device_used = {} - cl_nodewise_device_avail = {} + cl_nodewise_device_counts = ( + {} + ) # need nodewise to determine if device count is same across nodes - cl_nodewise_pmem_size = {} - cl_nodewise_pmem_used = {} - cl_nodewise_pmem_avail = {} + # Post 7.0 memory stats. Data only, no index or sindex bytes + cluster_memory_total = 0 + cluster_memory_used = 0 + cluster_memory_avail = 0 + + cluster_device_total = 0 + cluster_device_used = 0 + cluster_device_avail = 0 + + cluster_pmem_total = 0 + cluster_pmem_used = 0 + cluster_pmem_avail = 0 compute_license_data_size( namespace_stats, @@ -1265,7 +1259,7 @@ def create_summary( ns_total_devices = sum(device_counts.values()) ns_total_nodes = len(ns_stats.keys()) - if ns_total_devices: + if ns_total_devices and ns_total_nodes > 0: summary_dict["NAMESPACES"][ns]["devices_total"] = ns_total_devices summary_dict["NAMESPACES"][ns]["devices_per_node"] = round( ns_total_devices / ns_total_nodes @@ -1273,7 +1267,7 @@ def create_summary( if len(set(device_counts.values())) > 1: summary_dict["NAMESPACES"][ns]["device_count_same_across_nodes"] = False - # Memory + # Memory pre 7.0 mem_size: int = sum( util.get_value_from_second_level_of_dict( ns_stats, ("memory-size",), default_value=0, return_type=int @@ -1284,96 +1278,89 @@ def create_summary( ns_stats, ("memory_used_bytes",), default_value=0, return_type=int ).values() ) - mem_avail = mem_size - mem_used - mem_avail_pct = (mem_avail / mem_size) * 100.0 - mem_used_pct = 100.00 - mem_avail_pct - cl_memory_size_total += mem_size - cl_memory_size_avail += mem_avail - - ns_mem_usage: SummaryStorageUsageDict = { - "total": mem_size, - "used": mem_used, - "used_pct": mem_used_pct, - "avail": mem_avail, - "avail_pct": mem_avail_pct, - } - summary_dict["NAMESPACES"][ns]["memory"] = ns_mem_usage - index_type = summary_dict["NAMESPACES"][ns]["index_type"] = list( + if mem_size > 0: + mem_avail = mem_size - mem_used + mem_used_pct = (mem_used / mem_size) * 100.0 + mem_avail_pct = 100.00 - mem_used_pct + cluster_memory_data_and_indexes_total += mem_size + cluster_memory_data_and_indexes_used += mem_used + + ns_mem_usage: SummaryStorageUsageDict = { + "total": mem_size, + "used": mem_used, + "used_pct": mem_used_pct, + "avail": mem_avail, + "avail_pct": mem_avail_pct, + } + summary_dict["NAMESPACES"][ns]["memory_data_and_indexes"] = ns_mem_usage + + index_type = list( util.get_value_from_second_level_of_dict( - ns_stats, ("index-type",), default_value="shmem", return_type=str + ns_stats, ("index-type",), default_value="", return_type=str ).values() )[0] - # Pmem Index - if index_type == "pmem": - pmem_index_size = sum( - util.get_value_from_second_level_of_dict( - ns_configs[ns], - ("index-type.mounts-size-limit",), - default_value=0, - return_type=int, - ).values() - ) - pmem_index_used = sum( - util.get_value_from_second_level_of_dict( - ns_stats, - ("index_pmem_used_bytes",), - default_value=0, - return_type=int, - ).values() - ) + # Index + index_size = sum( + util.get_value_from_second_level_of_dict( + ns_configs[ns], + ( + "index-type.mounts-budget", + "index-type.mounts-size-limit", + ), + default_value=0, + return_type=int, + ).values() + ) + index_used = sum( + util.get_value_from_second_level_of_dict( + ns_stats, + ( + "index_used_bytes", + "index_pmem_used_bytes", + "index_flash_used_bytes", + ), + default_value=0, + return_type=int, + ).values() + ) - if pmem_index_size > 0: - pmem_index_avail = pmem_index_size - pmem_index_used - pmem_index_avail_pct = (pmem_index_avail / pmem_index_size) * 100.0 - pmem_index_used_pct = 100.00 - pmem_index_avail_pct - cl_pmem_index_size_total += pmem_index_size - cl_pmem_index_size_avail += pmem_index_avail - - ns_pmem_index_usage: SummaryStorageUsageDict = { - "total": pmem_index_size, - "used": pmem_index_used, - "used_pct": pmem_index_used_pct, - "avail": pmem_index_avail, - "avail_pct": pmem_index_avail_pct, + if index_size > 0 or index_used > 0: + ns_index_usage: SummaryStorageUsageDict | None = None + + if index_size > 0: + index_avail = index_size - index_used + index_avail_pct = (index_avail / index_size) * 100.0 + index_used_pct = 100.00 - index_avail_pct + ns_index_usage = { + "total": index_size, + "avail": index_avail, + "used": index_used, + "avail_pct": index_avail_pct, + "used_pct": index_used_pct, } - summary_dict["NAMESPACES"][ns]["pmem_index"] = ns_pmem_index_usage - - # Flash Index - elif index_type == "flash": - flash_index_size = sum( - util.get_value_from_second_level_of_dict( - ns_configs[ns], - ("index-type.mounts-size-limit",), - default_value=0, - return_type=int, - ).values() - ) - flash_index_used = sum( - util.get_value_from_second_level_of_dict( - ns_stats, - ("index_flash_used_bytes",), - default_value=0, - return_type=int, - ).values() - ) - - if flash_index_size > 0: - flash_index_avail = flash_index_size - flash_index_used - flash_index_avail_pct = (flash_index_avail / flash_index_size) * 100.0 - flash_index_used_pct = 100.00 - flash_index_avail_pct - cl_flash_index_size_total += flash_index_size - cl_flash_index_size_avail += flash_index_avail - - ns_flash_index_usage: SummaryStorageUsageDict = { - "total": flash_index_size, - "used": flash_index_used, - "used_pct": flash_index_used_pct, - "avail": flash_index_avail, - "avail_pct": flash_index_avail_pct, + else: + # shmem does not require you to configure mounts-budget + ns_index_usage = { + "used": index_used, } - summary_dict["NAMESPACES"][ns]["flash_index"] = ns_flash_index_usage + + if index_type == "pmem": + # TODO handle the cluster level aggregate + cluster_pmem_index_total += index_size + cluster_pmem_index_used += index_used + summary_dict["NAMESPACES"][ns]["pmem_index"] = ns_index_usage + summary_dict["NAMESPACES"][ns]["index_type"] = index_type + elif index_type == "flash": + cluster_flash_index_total += index_size + cluster_flash_index_used += index_used + summary_dict["NAMESPACES"][ns]["flash_index"] = ns_index_usage + summary_dict["NAMESPACES"][ns]["index_type"] = index_type + elif index_type == "shmem": + cluster_shmem_index_used += index_used + summary_dict["NAMESPACES"][ns]["shmem_index"] = ns_index_usage + summary_dict["NAMESPACES"][ns]["index_type"] = index_type storage_engine_type = list( util.get_value_from_second_level_of_dict( @@ -1381,96 +1368,80 @@ def create_summary( ).values() )[0] - if storage_engine_type == "device": - device_size = util.get_value_from_second_level_of_dict( - ns_stats, - ("device_total_bytes", "total-bytes-disk"), - default_value=0, - return_type=int, - ) - device_used = util.get_value_from_second_level_of_dict( - ns_stats, - ("device_used_bytes", "used-bytes-disk"), - default_value=0, - return_type=int, - ) - device_avail_pct = util.get_value_from_second_level_of_dict( - ns_stats, - ("device_available_pct", "available_pct"), - default_value=0, - return_type=int, - ) - device_avail = util.pct_to_value(device_size, device_avail_pct) - cl_nodewise_device_size = util.add_dicts( - cl_nodewise_device_size, device_size - ) - cl_nodewise_device_used = util.add_dicts( - cl_nodewise_device_used, device_used - ) - cl_nodewise_device_avail = util.add_dicts( - cl_nodewise_device_avail, device_avail - ) - device_size_total = sum(device_size.values()) - - if device_size_total > 0: - device_size_used = sum(device_used.values()) - device_size_avail = sum(device_avail.values()) - device_size_avail_pct = (device_size_avail / device_size_total) * 100.0 - device_size_used_pct = (device_size_used / device_size_total) * 100.0 - - ns_device_usage: SummaryStorageUsageDict = { - "total": device_size_total, - "used": device_size_used, - "used_pct": device_size_used_pct, - "avail": device_size_avail, - "avail_pct": device_size_avail_pct, - } - summary_dict["NAMESPACES"][ns]["device"] = ns_device_usage + data_size = util.get_value_from_second_level_of_dict( + ns_stats, + ( + "data_total_bytes", + "device_total_bytes", + "pmem_total_bytes", + "total-bytes-disk", + ), + default_value=0, + return_type=int, + ) + data_used = util.get_value_from_second_level_of_dict( + ns_stats, + ( + "data_used_bytes", + "device_used_bytes", + "pmem_used_bytes", + "used-bytes-disk", + ), + default_value=0, + return_type=int, + ) + data_avail_pct = util.get_value_from_second_level_of_dict( + ns_stats, + ( + "data_avail_pct", + "device_available_pct", + "pmem_available_pct", + "available_pct", + ), + default_value=0, + return_type=int, + ) + data_avail = util.pct_to_value(data_size, data_avail_pct) + data_size_total = sum(data_size.values()) + + if data_size_total > 0: + data_size_used = sum(data_used.values()) + data_size_avail = sum(data_avail.values()) + data_size_avail_pct = (data_size_avail / data_size_total) * 100.0 + data_size_used_pct = (data_size_used / data_size_total) * 100.0 + + ns_data_usage: SummaryStorageUsageDict = { + "total": data_size_total, + "used": data_size_used, + "used_pct": data_size_used_pct, + "avail": data_size_avail, + "avail_pct": data_size_avail_pct, + } - elif storage_engine_type == "pmem": - pmem_size = util.get_value_from_second_level_of_dict( - ns_stats, - ("pmem_total_bytes",), - default_value=0, - return_type=int, - ) - pmem_used = util.get_value_from_second_level_of_dict( - ns_stats, - ("pmem_used_bytes"), - default_value=0, - return_type=int, - ) - pmem_avail_pct = util.get_value_from_second_level_of_dict( - ns_stats, - ("pmem_available_pct"), - default_value=0, - return_type=int, - ) - pmem_avail = util.pct_to_value(pmem_size, pmem_avail_pct) - cl_nodewise_pmem_size = util.add_dicts(cl_nodewise_pmem_size, pmem_size) - cl_nodewise_pmem_used = util.add_dicts(cl_nodewise_pmem_used, pmem_used) - cl_nodewise_pmem_avail = util.add_dicts(cl_nodewise_pmem_avail, pmem_avail) - pmem_size_total = sum(pmem_size.values()) - - if pmem_size_total > 0: - pmem_size_used = sum(pmem_used.values()) - pmem_size_avail = sum(pmem_avail.values()) - pmem_size_avail_pct = (pmem_size_avail / pmem_size_total) * 100.0 - pmem_size_used_pct = (pmem_size_used / pmem_size_total) * 100.0 - - ns_pmem_usage: SummaryStorageUsageDict = { - "total": pmem_size_total, - "used": pmem_size_used, - "used_pct": pmem_size_used_pct, - "avail": pmem_size_avail, - "avail_pct": pmem_size_avail_pct, - } - summary_dict["NAMESPACES"][ns]["pmem"] = ns_pmem_usage + if storage_engine_type == "device": + cluster_device_total += data_size_total + cluster_device_used += data_size_used + cluster_device_avail += data_size_avail + summary_dict["NAMESPACES"][ns]["device"] = ns_data_usage + elif storage_engine_type == "pmem": + cluster_pmem_total += data_size_total + cluster_pmem_used += data_size_used + cluster_pmem_avail += data_size_avail + summary_dict["NAMESPACES"][ns]["pmem"] = ns_data_usage + elif storage_engine_type == "memory": + cluster_memory_total += data_size_total + cluster_memory_used += data_size_used + cluster_memory_avail += data_size_avail + summary_dict["NAMESPACES"][ns]["memory"] = ns_data_usage compression_ratio = max( util.get_value_from_second_level_of_dict( ns_stats, - ("device_compression_ratio", "pmem_compression_ratio"), + ( + "data_compression_ratio", + "device_compression_ratio", + "pmem_compression_ratio", + ), default_value=0.0, return_type=float, ).values() @@ -1490,31 +1461,22 @@ def create_summary( ) ) - data_in_memory = list( + cache_read_pcts = list( util.get_value_from_second_level_of_dict( ns_stats, - ("storage-engine.data-in-memory", "data-in-memory"), - default_value=False, - return_type=bool, + ("cache_read_pct", "cache-read-pct"), + default_value=None, + return_type=int, ).values() - )[0] + ) + if cache_read_pcts: + try: + summary_dict["NAMESPACES"][ns]["cache_read_pct"] = sum( + cache_read_pcts + ) // len(cache_read_pcts) + except Exception: + pass - if data_in_memory: - cache_read_pcts = list( - util.get_value_from_second_level_of_dict( - ns_stats, - ("cache_read_pct", "cache-read-pct"), - default_value=None, - return_type=int, - ).values() - ) - if cache_read_pcts: - try: - summary_dict["NAMESPACES"][ns]["cache_read_pct"] = sum( - cache_read_pcts - ) // len(cache_read_pcts) - except Exception: - pass master_objects = sum( util.get_value_from_second_level_of_dict( ns_stats, @@ -1548,66 +1510,81 @@ def create_summary( if len(set(cl_nodewise_device_counts.values())) > 1: summary_dict["CLUSTER"]["device_count_same_across_nodes"] = False - if cl_memory_size_total > 0: - memory_avail_pct = (cl_memory_size_avail / cl_memory_size_total) * 100.0 + # Pre 7.0 memory stats + if cluster_memory_data_and_indexes_total > 0: + cluster_memory_data_and_indexes_used_pct = ( + cluster_memory_data_and_indexes_used / cluster_memory_data_and_indexes_total + ) * 100.0 cluster_memory: SummaryStorageUsageDict = { - "total": cl_memory_size_total, - "avail": cl_memory_size_avail, - "avail_pct": memory_avail_pct, - "used": cl_memory_size_total - cl_memory_size_avail, - "used_pct": 100.0 - memory_avail_pct, + "total": cluster_memory_data_and_indexes_total, + "avail": cluster_memory_data_and_indexes_total + - cluster_memory_data_and_indexes_used, + "avail_pct": 100 - cluster_memory_data_and_indexes_used_pct, + "used": cluster_memory_data_and_indexes_used, + "used_pct": cluster_memory_data_and_indexes_used_pct, } - summary_dict["CLUSTER"]["memory"] = cluster_memory + summary_dict["CLUSTER"]["memory_data_and_indexes"] = cluster_memory - if cl_pmem_index_size_total > 0: - cl_pmem_index_size_avail_pct = ( - cl_pmem_index_size_avail / cl_pmem_index_size_total + if cluster_pmem_index_total > 0: + cluster_pmem_index_size_used_pct = ( + cluster_pmem_index_used / cluster_pmem_index_total ) * 100.0 cluster_pmem_index: SummaryStorageUsageDict = { - "total": cl_pmem_index_size_total, - "avail": cl_pmem_index_size_avail, - "avail_pct": cl_pmem_index_size_avail_pct, - "used": cl_pmem_index_size_total - cl_pmem_index_size_avail, - "used_pct": 100.0 - cl_pmem_index_size_avail_pct, + "total": cluster_pmem_index_total, + "avail": cluster_pmem_index_total - cluster_pmem_index_used, + "avail_pct": 100 - cluster_pmem_index_size_used_pct, + "used": cluster_pmem_index_used, + "used_pct": cluster_pmem_index_size_used_pct, } summary_dict["CLUSTER"]["pmem_index"] = cluster_pmem_index - if cl_flash_index_size_total > 0: - cl_flash_index_size_avail_pct = ( - cl_flash_index_size_avail / cl_flash_index_size_total + if cluster_flash_index_total > 0: + cluster_flash_index_used_pct = ( + cluster_flash_index_used / cluster_flash_index_total ) * 100.0 cluster_flash_index: SummaryStorageUsageDict = { - "total": cl_flash_index_size_total, - "avail": cl_flash_index_size_avail, - "avail_pct": cl_flash_index_size_avail_pct, - "used": cl_flash_index_size_total - cl_flash_index_size_avail, - "used_pct": 100.0 - cl_flash_index_size_avail_pct, + "total": cluster_flash_index_total, + "avail": cluster_flash_index_total - cluster_flash_index_used, + "avail_pct": 100 - cluster_flash_index_used_pct, + "used": cluster_flash_index_used, + "used_pct": cluster_flash_index_used_pct, } summary_dict["CLUSTER"]["flash_index"] = cluster_flash_index - cl_device_size_total = sum(cl_nodewise_device_size.values()) - if cl_device_size_total > 0: - cluster_device_used = sum(cl_nodewise_device_used.values()) - cluster_device_avail = sum(cl_nodewise_device_avail.values()) + if cluster_shmem_index_used > 0: + cluster_shmem_index: SummaryStorageUsageDict = { + "used": cluster_shmem_index_used, + } + summary_dict["CLUSTER"]["shmem_index"] = cluster_shmem_index + + # Post 7.0 memory stats that only include data not sindex or index bytes + if cluster_memory_total > 0: + cluster_memory_index: SummaryStorageUsageDict = { + "total": cluster_memory_total, + "avail": cluster_memory_avail, + "avail_pct": (cluster_memory_avail / cluster_memory_total) * 100.0, + "used": cluster_memory_used, + "used_pct": (cluster_memory_used / cluster_memory_total) * 100.0, + } + summary_dict["CLUSTER"]["memory"] = cluster_memory_index + + if cluster_device_total > 0: cluster_device_index: SummaryStorageUsageDict = { - "total": cl_device_size_total, + "total": cluster_device_total, "avail": cluster_device_avail, - "avail_pct": (cluster_device_avail / cl_device_size_total) * 100.0, + "avail_pct": (cluster_device_avail / cluster_device_total) * 100.0, "used": cluster_device_used, - "used_pct": (cluster_device_used / cl_device_size_total) * 100.0, + "used_pct": (cluster_device_used / cluster_device_total) * 100.0, } summary_dict["CLUSTER"]["device"] = cluster_device_index - cl_pmem_size_total = sum(cl_nodewise_pmem_size.values()) - if cl_pmem_size_total > 0: - cluster_pmem_used = sum(cl_nodewise_pmem_used.values()) - cluster_pmem_avail = sum(cl_nodewise_pmem_avail.values()) + if cluster_pmem_total > 0: cluster_pmem_index: SummaryStorageUsageDict = { - "total": cl_pmem_size_total, + "total": cluster_pmem_total, "avail": cluster_pmem_avail, - "avail_pct": (cluster_pmem_avail / cl_pmem_size_total) * 100.0, + "avail_pct": (cluster_pmem_avail / cluster_pmem_total) * 100.0, "used": cluster_pmem_used, - "used_pct": (cluster_pmem_used / cl_pmem_size_total) * 100.0, + "used_pct": (cluster_pmem_used / cluster_pmem_total) * 100.0, } summary_dict["CLUSTER"]["pmem"] = cluster_pmem_index @@ -1869,56 +1846,52 @@ def _format_set_stop_writes_metrics( ): for node in set_stats: for (ns, set_), stats in set_stats[node].items(): - metric1 = "memory_data_bytes" - metric2 = "device_data_bytes" config = "stop-writes-size" - usage = None metric = None - usage1: str | None = stats.get(metric1, None) - usage2: str | None = stats.get(metric2, None) - threshold: str | None = stats.get(config, None) + threshold: int | None = util.get_value_from_dict(stats, config, None, int) - if usage1 is None or usage1 == "0": - metric = metric2 - usage = usage2 - else: - metric = metric1 - usage = usage1 + metric = "data_used_bytes" # Added in 7.0 + usage = util.get_value_from_dict(stats, metric, None, int) + + if usage is None: + """ + Memory has to be checked before device_data_bytes per the docs. + The limit is checked against one or the other not both. + """ + metric = "memory_data_bytes" + usage = util.get_value_from_dict(stats, metric, None, int) + + if usage is None or usage == 0: + metric = "device_data_bytes" + usage = util.get_value_from_dict(stats, metric, None, int) if usage is not None and threshold is not None: - use = int(usage) - thresh = int(threshold) - sw = _is_stop_writes_cause(use, thresh) + sw = _is_stop_writes_cause(usage, threshold) _create_stop_writes_entry( stop_writes_metrics[node], metric, - use, + usage, sw, - thresh, + threshold, config=config, namespace=ns, set_=set_, ) metric = "objects" - config = "stop-writes-count" - usage: str | None = stats.get(metric, None) - threshold: str | None = stats.get(config, None) - - if threshold is None: - config = "set-stop-writes-count" - threshold = stats.get(config, None) + usage = util.get_value_from_dict(stats, metric, None, int) + config, threshold = _get_first_value_from_dict_with_key( + stats, ("stop-writes-count", "set-stop-writes-count"), None, int + ) if usage is not None and threshold is not None: - use = int(usage) - thresh = int(threshold) - sw = _is_stop_writes_cause(use, thresh) + sw = _is_stop_writes_cause(usage, threshold) _create_stop_writes_entry( stop_writes_metrics[node], metric, - use, + usage, sw, - thresh, + threshold, config=config, namespace=ns, set_=set_, diff --git a/lib/view/templates.py b/lib/view/templates.py index 9f20e3ce..a3dfd279 100644 --- a/lib/view/templates.py +++ b/lib/view/templates.py @@ -258,19 +258,38 @@ def usage_weighted_avg(edatas: list[EntryData]): formatters=(Formatters.red_alert(lambda edata: edata.value),), ), Subgroup( - "Device", + "System Memory", + ( + Field( + "Avail%", Projectors.Percent("service_stats", "system_free_mem_pct") + ), + Field( + "Evict%", + Projectors.Percent("ns_stats", "evict-sys-memory-pct"), + ), + ), + ), + Subgroup( + "Primary Index", ( + Field("Type", Projectors.String("ns_stats", "index-type")), Field( "Total", Projectors.Number( - "ns_stats", "device_total_bytes", "total-bytes-disk" + "ns_stats", + "index-type.mounts-budget", # Added in 7.0 + "index-type.mounts-size-limit", ), hidden=True, ), Field( "Used", Projectors.Number( - "ns_stats", "device_used_bytes", "used-bytes-disk" + "ns_stats", + "index_used_bytes", # flash, pmem, and memory metrics were consolidated in 7.0 + "index_flash_used_bytes", + "index_pmem_used_bytes", + "memory_used_index_bytes", ), converter=Converters.byte, aggregator=Aggregators.sum(), @@ -279,52 +298,63 @@ def usage_weighted_avg(edatas: list[EntryData]): "Used%", Projectors.Div( Projectors.Number( - "ns_stats", "device_used_bytes", "used-bytes-disk" + "ns_stats", + "index_used_bytes", # flash, pmem, and memory metrics were consolidated in 7.0 + "index_flash_used_bytes", + "index_pmem_used_bytes", + "memory_used_index_bytes", ), Projectors.Number( - "ns_stats", "device_total_bytes", "total-bytes-disk" + "ns_stats", + "index-type.mounts-budget", # Meant to be used with index_used_bytes. Both added in 7.0 + "index-type.mounts-size-limit", ), ), converter=Converters.ratio_to_pct, aggregator=ComplexAggregator( - create_usage_weighted_avg("Device"), + create_usage_weighted_avg("Primary Index"), converter=Converters.ratio_to_pct, ), formatters=( Formatters.yellow_alert( lambda edata: edata.value * 100 - >= edata.record["Device"]["HWM%"] - and edata.record["Device"]["HWM%"] != 0 + >= edata.record["Primary Index"]["Evict%"] + and edata.record["Primary Index"]["Evict%"] != 0 ), ), ), Field( - "HWM%", - Projectors.Number("ns_stats", "high-water-disk-pct"), - converter=Converters.pct, - ), - Field( - "Avail%", + "Evict%", Projectors.Number( - "ns_stats", "device_available_pct", "available_pct" + "ns_stats", + "index-type.evict-mounts-pct", # Added in 7.0 + "index-type.mounts-high-water-pct", ), converter=Converters.pct, - formatters=(Formatters.red_alert(lambda edata: edata.value < 10),), ), ), ), Subgroup( - "Memory", + "Secondary Index", ( + Field("Type", Projectors.String("ns_stats", "sindex-type")), Field( "Total", - Projectors.Number("ns_stats", "memory-size", "total-bytes-memory"), + Projectors.Number( + "ns_stats", + "sindex-type.mounts-budget", # Added in 7.0 + "sindex-type.mounts-size-limit", + ), hidden=True, ), Field( "Used", Projectors.Number( - "ns_stats", "memory_used_bytes", "used-bytes-memory" + "ns_stats", + "sindex_used_bytes", # flash, pmem, and memory metrics were consolidated in 7.0 + "sindex_flash_used_bytes", + "sindex_pmem_used_bytes", + "memory_used_sindex_bytes", ), converter=Converters.byte, aggregator=Aggregators.sum(), @@ -333,46 +363,50 @@ def usage_weighted_avg(edatas: list[EntryData]): "Used%", Projectors.Div( Projectors.Number( - "ns_stats", "memory_used_bytes", "used-bytes-memory" + "ns_stats", + "sindex_used_bytes", # flash, pmem, and memory metrics were consolidated in 7.0 + "sindex_flash_used_bytes", + "sindex_pmem_used_bytes", + "memory_used_sindex_bytes", ), Projectors.Number( - "ns_stats", "memory-size", "total-bytes-memory" + "ns_stats", + "sindex-type.mounts-budget", # Meant to be used with index_used_bytes. Both added in 7.0 + "sindex-type.mounts-size-limit", ), ), converter=Converters.ratio_to_pct, aggregator=ComplexAggregator( - create_usage_weighted_avg("Memory"), + create_usage_weighted_avg("Secondary Index"), converter=Converters.ratio_to_pct, ), formatters=( Formatters.yellow_alert( lambda edata: edata.value * 100 - > edata.record["Memory"]["HWM%"] - and edata.record["Memory"]["HWM%"] != 0 + >= edata.record["Secondary Index"]["Evict%"] + and edata.record["Secondary Index"]["Evict%"] != 0 ), ), ), Field( - "HWM%", - Projectors.Number("ns_stats", "high-water-memory-pct"), - converter=Converters.pct, - ), - Field( - "Stop%", - Projectors.Number("ns_stats", "stop-writes-pct"), - converter=Converters.pct, + "Evict%", + Projectors.Number( + "ns_stats", + "sindex-type.evict-mounts-pct", # Added in 7.0 + "sindex-type.mounts-high-water-pct", + ), ), ), ), Subgroup( - "Primary Index", + "Storage Engine", ( - Field("Type", Projectors.String("ns_stats", "index-type")), + Field("Type", Projectors.String("ns_stats", "storage-engine")), Field( "Total", Projectors.Number( "ns_stats", - "index-type.mounts-size-limit", + "data_total_bytes", ), hidden=True, ), @@ -380,9 +414,7 @@ def usage_weighted_avg(edatas: list[EntryData]): "Used", Projectors.Number( "ns_stats", - "index_flash_used_bytes", - "index_pmem_used_bytes", - "memory_used_index_bytes", + "data_used_bytes", ), converter=Converters.byte, aggregator=Aggregators.sum(), @@ -392,51 +424,132 @@ def usage_weighted_avg(edatas: list[EntryData]): Projectors.Div( Projectors.Number( "ns_stats", - "index_flash_used_bytes", - "index_pmem_used_bytes", - "memory_used_index_bytes", + "data_used_bytes", + ), + Projectors.Number( + "ns_stats", + "data_total_bytes", ), - Projectors.Number("ns_stats", "index-type.mounts-size-limit"), ), converter=Converters.ratio_to_pct, aggregator=ComplexAggregator( - create_usage_weighted_avg("Primary Index"), + create_usage_weighted_avg("Storage Engine"), + converter=Converters.ratio_to_pct, + ), + formatters=( + Formatters.red_alert( + lambda edata: edata.value * 100 + >= edata.record["Storage Engine"]["Evict%"] + and edata.record["Storage Engine"]["Evict%"] != 0 + or edata.value * 100 + >= edata.record["Storage Engine"]["Used Stop%"] + ), + ), + ), + Field( + "Evict%", + Projectors.Number("ns_stats", "storage-engine.evict-used-pct"), + converter=Converters.pct, + ), + Field( + "Used Stop%", + Projectors.Number( + "ns_stats", "storage-engine.stop-writes-used-pct" + ), + converter=Converters.pct, + ), + Field( + "Avail%", + Projectors.Number( + "ns_stats", + "data_avail_pct", + ), + converter=Converters.pct, + formatters=( + Formatters.red_alert( + lambda edata: edata.value + <= edata.record["Storage Engine"]["Avail Stop%"] + ), + ), + ), + Field( + "Avail Stop%", + Projectors.Number( + "ns_stats", "storage-engine.stop-writes-avail-pct" + ), + converter=Converters.pct, + ), + ), + ), + # Memory was unified in pindex, sindex, and storage-engine in 7.0. This will + # only be displayed if the cluster is running 6.4 or earlier. + Subgroup( + "Memory", + ( + Field( + "Total", + Projectors.Number("ns_stats", "memory-size", "total-bytes-memory"), + hidden=True, + ), + Field( + "Used", + Projectors.Number( + "ns_stats", "memory_used_bytes", "used-bytes-memory" + ), + converter=Converters.byte, + aggregator=Aggregators.sum(), + ), + Field( + "Used%", + Projectors.Div( + Projectors.Number( + "ns_stats", "memory_used_bytes", "used-bytes-memory" + ), + Projectors.Number( + "ns_stats", "memory-size", "total-bytes-memory" + ), + ), + converter=Converters.ratio_to_pct, + aggregator=ComplexAggregator( + create_usage_weighted_avg("Memory"), converter=Converters.ratio_to_pct, ), formatters=( Formatters.yellow_alert( lambda edata: edata.value * 100 - >= edata.record["Primary Index"]["HWM%"] - and edata.record["Primary Index"]["HWM%"] != 0 + > edata.record["Memory"]["HWM%"] + and edata.record["Memory"]["HWM%"] != 0 ), ), ), Field( "HWM%", - Projectors.Number("ns_stats", "index-type.mounts-high-water-pct"), + Projectors.Number("ns_stats", "high-water-memory-pct"), + converter=Converters.pct, + ), + Field( + "Stop%", + Projectors.Number("ns_stats", "stop-writes-pct"), converter=Converters.pct, ), ), ), + # Replaced by "Storage Engine" in 7.0. This will + # only be displayed if the cluster is running 6.4 or earlier. Subgroup( - "Secondary Index", + "Device", ( - Field("Type", Projectors.String("ns_stats", "sindex-type")), Field( "Total", Projectors.Number( - "ns_stats", - "sindex-type.mounts-size-limit", + "ns_stats", "device_total_bytes", "total-bytes-disk" ), hidden=True, ), Field( "Used", Projectors.Number( - "ns_stats", - "sindex_flash_used_bytes", - "sindex_pmem_used_bytes", - "memory_used_sindex_bytes", + "ns_stats", "device_used_bytes", "used-bytes-disk" ), converter=Converters.byte, aggregator=Aggregators.sum(), @@ -445,34 +558,42 @@ def usage_weighted_avg(edatas: list[EntryData]): "Used%", Projectors.Div( Projectors.Number( - "ns_stats", - "sindex_flash_used_bytes", - "sindex_pmem_used_bytes", - "memory_used_sindex_bytes", + "ns_stats", "device_used_bytes", "used-bytes-disk" + ), + Projectors.Number( + "ns_stats", "device_total_bytes", "total-bytes-disk" ), - Projectors.Number("ns_stats", "sindex-type.mounts-size-limit"), ), converter=Converters.ratio_to_pct, aggregator=ComplexAggregator( - create_usage_weighted_avg("Secondary Index"), + create_usage_weighted_avg("Device"), converter=Converters.ratio_to_pct, ), formatters=( Formatters.yellow_alert( - lambda edata: edata.value - >= edata.record["Secondary Index"]["HWM%"] - and edata.record["Secondary Index"]["HWM%"] != 0 + lambda edata: edata.value * 100 + >= edata.record["Device"]["HWM%"] + and edata.record["Device"]["HWM%"] != 0 ), ), ), Field( "HWM%", - Projectors.Number("ns_stats", "sindex-type.mounts-high-water-pct"), + Projectors.Number("ns_stats", "high-water-disk-pct"), + converter=Converters.pct, + ), + Field( + "Avail%", + Projectors.Number( + "ns_stats", "device_available_pct", "available_pct" + ), + converter=Converters.pct, + formatters=(Formatters.red_alert(lambda edata: edata.value < 10),), ), ), ), ), - from_source=("node_ids", "node_names", "ns_stats"), + from_source=("node_ids", "node_names", "ns_stats", "service_stats"), for_each="ns_stats", group_by=("Namespace"), order_by=FieldSorter("Node"), @@ -609,20 +730,30 @@ def set_index_projector(enable_index, index_populating): node_field, hidden_node_id_field, Field("Set Delete", Projectors.Boolean("set_stats", "deleting", "set-delete")), + Field( + "Storage Engine Used", + Projectors.Number("set_stats", "data_used_bytes"), # New in server 7.0 + converter=Converters.byte, + aggregator=Aggregators.sum(), + ), Field( "Memory Used", - Projectors.Number("set_stats", "memory_data_bytes", "n-bytes-memory"), + Projectors.Number( + "set_stats", "memory_data_bytes", "n-bytes-memory" + ), # Unified into data_used_bytes in 7.0 converter=Converters.byte, aggregator=Aggregators.sum(), ), Field( "Disk Used", - Projectors.Number("set_stats", "device_data_bytes", "n-bytes-device"), + Projectors.Number( + "set_stats", "device_data_bytes", "n-bytes-device" + ), # Unified into data_used_bytes in 7.0 converter=Converters.byte, aggregator=Aggregators.sum(), ), Subgroup( - "Quota", + "Size Quota", ( Field( "Total", @@ -636,19 +767,31 @@ def set_index_projector(enable_index, index_populating): Field( "Used%", Projectors.Div( - Projectors.Sum( - Projectors.Number( - "set_stats", "memory_data_bytes", "n-bytes-memory" - ), - Projectors.Number( - "set_stats", "device_data_bytes", "n-bytes-device" + Projectors.Any( + FieldType.number, + Projectors.Number("set_stats", "data_used_bytes"), + Projectors.Func( + FieldType.number, + lambda m_data, d_data: d_data + if m_data == 0 + else m_data, + Projectors.Number( + "set_stats", + "memory_data_bytes", + "n-bytes-memory", # Unified into data_used_bytes in 7.0 + ), + Projectors.Number( + "set_stats", + "device_data_bytes", + "n-bytes-device", # Unified into data_used_bytes in 7.0 + ), ), ), Projectors.Number("set_stats", "stop-writes-size"), ), converter=Converters.ratio_to_pct, aggregator=ComplexAggregator( - create_usage_weighted_avg("Quota"), + create_usage_weighted_avg("Size Quota"), converter=Converters.ratio_to_pct, ), formatters=( @@ -661,12 +804,41 @@ def set_index_projector(enable_index, index_populating): ), ), Field( - "Objects", + "Total Records", Projectors.Number("set_stats", "objects", "n_objects"), converter=Converters.scientific_units, aggregator=Aggregators.sum(), ), - Field("Stop Writes Count", Projectors.Number("set_stats", "stop-writes-count")), + Subgroup( + "Records Quota", + ( + Field( + "Total", + Projectors.Number("set_stats", "stop-writes-count"), + ), + Field( + "Used%", + Projectors.Div( + Projectors.Number( + "set_stats", + "objects", + ), + Projectors.Number("set_stats", "stop-writes-count"), + ), + converter=Converters.ratio_to_pct, + aggregator=ComplexAggregator( + create_usage_weighted_avg("Records Quota"), + converter=Converters.ratio_to_pct, + ), + formatters=( + Formatters.red_alert(lambda edata: edata.value * 100 >= 90.0), + Formatters.yellow_alert( + lambda edata: edata.value * 100 >= 75.0 + ), + ), + ), + ), + ), Field("Disable Eviction", Projectors.Boolean("set_stats", "disable-eviction")), Field("Set Enable XDR", Projectors.String("set_stats", "set-enable-xdr")), Field( @@ -1079,7 +1251,7 @@ def _storage_type_display_name(storage_type: str, field_title: str, subgroup: bo if not subgroup: title = ( - " ".join(val[0].upper() + val[1:] for val in storage_type.split(" ")) + " ".join(val[0].upper() + val[1:] for val in storage_type.split("_")) + " " + field_title ) @@ -1089,8 +1261,13 @@ def _storage_type_display_name(storage_type: str, field_title: str, subgroup: bo return title -def create_summary_total(source: str, storage_type: str, subgroup=False): - title = _storage_type_display_name(storage_type, "Total", subgroup) +def create_summary_total( + source: str, storage_type: str, subgroup=False, display_name: str | None = None +): + if display_name is None: + title = _storage_type_display_name(storage_type, "Total", subgroup) + else: + title = _storage_type_display_name(display_name, "Total", subgroup) return Field( title, @@ -1103,8 +1280,13 @@ def create_summary_total(source: str, storage_type: str, subgroup=False): ) -def create_summary_used(source: str, storage_type: str, subgroup=False): - title = _storage_type_display_name(storage_type, "Used", subgroup) +def create_summary_used( + source: str, storage_type: str, subgroup=False, display_name: str | None = None +): + if display_name is None: + title = _storage_type_display_name(storage_type, "Used", subgroup) + else: + title = _storage_type_display_name(display_name, "Used", subgroup) return Field( title, @@ -1117,8 +1299,13 @@ def create_summary_used(source: str, storage_type: str, subgroup=False): ) -def create_summary_used_pct(source: str, storage_type: str, subgroup=False): - title = _storage_type_display_name(storage_type, "Used%", subgroup) +def create_summary_used_pct( + source: str, storage_type: str, subgroup=False, display_name: str | None = None +): + if display_name is None: + title = _storage_type_display_name(storage_type, "Used%", subgroup) + else: + title = _storage_type_display_name(display_name, "Used%", subgroup) return Field( title, @@ -1131,8 +1318,13 @@ def create_summary_used_pct(source: str, storage_type: str, subgroup=False): ) -def create_summary_avail(source: str, storage_type: str, subgroup=False): - title = _storage_type_display_name(storage_type, "Avail", subgroup) +def create_summary_avail( + source: str, storage_type: str, subgroup=False, display_name: str | None = None +): + if display_name is None: + title = _storage_type_display_name(storage_type, "Avail", subgroup) + else: + title = _storage_type_display_name(display_name, "Avail", subgroup) return Field( title, @@ -1145,8 +1337,13 @@ def create_summary_avail(source: str, storage_type: str, subgroup=False): ) -def create_summary_avail_pct(source: str, storage_type: str, subgroup=False): - title = _storage_type_display_name(storage_type, "Avail%", subgroup) +def create_summary_avail_pct( + source: str, storage_type: str, subgroup=False, display_name: str | None = None +): + if display_name is None: + title = _storage_type_display_name(storage_type, "Avail%", subgroup) + else: + title = _storage_type_display_name(display_name, "Avail%", subgroup) return Field( title, @@ -1179,8 +1376,8 @@ def extract_and_convert_value(d: dict): "Migrations", Projectors.String("cluster_dict", "migrations_in_progress"), formatters=( - Formatters.green_alert(lambda edata: edata.value), - Formatters.red_alert(lambda edata: not edata.value), + Formatters.red_alert(lambda edata: edata.value), + Formatters.green_alert(lambda edata: not edata.value), ), ), Field( @@ -1221,12 +1418,40 @@ def extract_and_convert_value(d: dict): # Subgroup( # "Memory", # ( - create_summary_total("cluster_dict", "memory"), - create_summary_used("cluster_dict", "memory"), - create_summary_used_pct("cluster_dict", "memory"), - create_summary_avail("cluster_dict", "memory"), - create_summary_avail_pct("cluster_dict", "memory"), + create_summary_total( + "cluster_dict", + "memory_data_and_indexes", + display_name="Memory (Data + Indexes)", + ), + create_summary_used( + "cluster_dict", + "memory_data_and_indexes", + display_name="Memory (Data + Indexes)", + ), + create_summary_used_pct( + "cluster_dict", + "memory_data_and_indexes", + display_name="Memory (Data + Indexes)", + ), + create_summary_avail( + "cluster_dict", + "memory_data_and_indexes", + display_name="Memory (Data + Indexes)", + ), + create_summary_avail_pct( + "cluster_dict", + "memory_data_and_indexes", + display_name="Memory (Data + Indexes)", + ), + # ), # ), + # Subgroup( + # "Shmem Index", # Sindex added to shmem in EE by default in 6.1. However, + # this will only be displayed in 7.0. Pre 7.0 includes shmem index metrics + # as apart of memory metrics. + # ( + create_summary_used("cluster_dict", "shmem_index"), + # ), # ), # Subgroup( # "Pmem Index", @@ -1249,6 +1474,16 @@ def extract_and_convert_value(d: dict): # ), # ), # Subgroup( + # "Memory", + # ( + create_summary_total("cluster_dict", "memory"), + create_summary_used("cluster_dict", "memory"), + create_summary_used_pct("cluster_dict", "memory"), + create_summary_avail("cluster_dict", "memory"), + create_summary_avail_pct("cluster_dict", "memory"), + # ), + # ), + # Subgroup( # "Device", # ( create_summary_total("cluster_dict", "device"), @@ -1374,11 +1609,23 @@ def extract_and_convert_value(d: dict): ), ), Subgroup( - "Memory", + "Memory (Data + Indexes)", ( - create_summary_total("ns_stats", "memory", subgroup=True), - create_summary_used_pct("ns_stats", "memory", subgroup=True), - create_summary_avail_pct("ns_stats", "memory", subgroup=True), + create_summary_total( + "ns_stats", "memory_data_and_indexes", subgroup=True + ), + create_summary_used( + "ns_stats", "memory_data_and_indexes", subgroup=True + ), + create_summary_used_pct( + "ns_stats", "memory_data_and_indexes", subgroup=True + ), + create_summary_avail( + "ns_stats", "memory_data_and_indexes", subgroup=True + ), + create_summary_avail_pct( + "ns_stats", "memory_data_and_indexes", subgroup=True + ), ), ), Subgroup( @@ -1397,6 +1644,14 @@ def extract_and_convert_value(d: dict): create_summary_avail_pct("ns_stats", "flash_index", subgroup=True), ), ), + Subgroup( + "Memory", + ( + create_summary_total("ns_stats", "memory", subgroup=True), + create_summary_used_pct("ns_stats", "memory", subgroup=True), + create_summary_avail_pct("ns_stats", "memory", subgroup=True), + ), + ), Subgroup( "Device", ( diff --git a/lib/view/view.py b/lib/view/view.py index 6a2c3d54..a16fdb5b 100644 --- a/lib/view/view.py +++ b/lib/view/view.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import asyncio +from collections.abc import Iterator import datetime import locale import logging @@ -29,8 +29,10 @@ from lib.utils.common import ( StopWritesDict, SummaryClusterDict, + SummaryClusterLicenseAggDict, SummaryDict, SummaryNamespacesDict, + SummaryStorageUsageDict, ) from lib.view import sheet, terminal, templates from lib.view.sheet import SheetStyle @@ -111,7 +113,7 @@ def info_network( cluster: Cluster, timestamp="", with_=None, - **ignore + **ignore, ): node_names = cluster.get_node_names(with_) node_ids = cluster.get_node_ids(with_) @@ -159,7 +161,9 @@ def info_network( @staticmethod @reserved_modifiers - def info_namespace_usage(stats, cluster, timestamp="", with_=None, **ignore): + def info_namespace_usage( + ns_stats, service_stats, cluster, timestamp="", with_=None, **ignore + ): node_names = cluster.get_node_names(with_) node_ids = cluster.get_node_ids(with_) title_suffix = CliView._get_timestamp_suffix(timestamp) @@ -167,7 +171,8 @@ def info_namespace_usage(stats, cluster, timestamp="", with_=None, **ignore): sources = dict( node_ids=node_ids, node_names=node_names, - ns_stats=stats, + ns_stats=ns_stats, + service_stats=service_stats, ) common = dict(principal=cluster.get_expected_principal()) @@ -344,7 +349,7 @@ def show_distribution( like=None, with_=None, timestamp="", - **ignore + **ignore, ): likes = util.compile_likes(like) title_suffix = CliView._get_timestamp_suffix(timestamp) @@ -390,7 +395,7 @@ def show_object_distribution( with_=None, timestamp="", loganalyser_mode=False, - **ignore + **ignore, ): node_names = cluster.get_node_names(with_) likes = util.compile_likes(like) @@ -510,7 +515,7 @@ def show_config( title_every_nth=0, flip_output=False, timestamp="", - **ignore + **ignore, ): title_suffix = CliView._get_timestamp_suffix(timestamp) title = title + title_suffix @@ -554,7 +559,7 @@ def show_xdr_ns_config( title_every_nth=0, flip_output=False, timestamp="", - **ignore + **ignore, ): title_suffix = CliView._get_timestamp_suffix(timestamp) node_names = cluster.get_node_names(with_) @@ -606,7 +611,7 @@ def show_xdr_ns_stats( show_total=False, by_dc=False, timestamp="", - **ignore + **ignore, ): title_suffix = CliView._get_timestamp_suffix(timestamp) node_names = cluster.get_node_names(with_) @@ -672,7 +677,7 @@ def show_xdr_dc_config( title_every_nth=0, flip_output=False, timestamp="", - **ignore + **ignore, ): dc_configs = util.flip_keys(dc_configs) sorted_keys = list(dc_configs.keys()) @@ -705,7 +710,7 @@ def show_xdr_dc_stats( flip_output=False, show_total=False, timestamp="", - **ignore + **ignore, ): dc_configs = util.flip_keys(dc_configs) sorted_keys = list(dc_configs.keys()) @@ -735,7 +740,7 @@ def show_xdr_filters( title_every_nth=0, flip_output=False, timestamp="", - **ignore + **ignore, ): title_suffix = CliView._get_timestamp_suffix(timestamp) style = SheetStyle.rows if flip_output else None @@ -1169,7 +1174,7 @@ def show_roster( with_=None, flip=False, timestamp="", - **ignore + **ignore, ): if not roster_data: return @@ -1243,7 +1248,7 @@ def show_jobs( trid=None, like=None, with_=None, - **ignore + **ignore, ): if jobs_data is None: return @@ -2079,10 +2084,104 @@ def _summary_namespace_table_view(stats: SummaryNamespacesDict, **ignore): sheet.render(templates.summary_namespace_sheet, title, sources) ) + class SummaryLine: + def __init__(self, name: str, info: str): + self.name = name + self.info = info + + def __str__(self) -> str: + s = self.name.ljust(25) + s += ":" + (" " * 2) + return s + self.info + + class SummaryUsageLine(SummaryLine): + DEFAULT_FORMAT = "Total {total}, {used_pct:.2f}% used ({used}), {avail_pct:.2f}% available {con_str}({avail})" + + def __init__( + self, + name: str, + usage_dict: SummaryStorageUsageDict, + contiguous=False, + format_str=None, + ): + if contiguous and format_str: + raise ValueError( + "contiguous and format_str cannot both be specified for SummaryUsageLine" + ) + + s = "" + con_str = "contiguous space " if contiguous else "" + converted_dict = { + "used": file_size.size(usage_dict["used"]).strip(), + } + + if "total" in usage_dict: + converted_dict["total"] = file_size.size(usage_dict["total"]).strip() + if "used_pct" in usage_dict: + converted_dict["used_pct"] = usage_dict["used_pct"] + if "avail" in usage_dict: + converted_dict["avail"] = file_size.size(usage_dict["avail"]).strip() + if "avail_pct" in usage_dict: + converted_dict["avail_pct"] = usage_dict["avail_pct"] + + if format_str: + s += format_str.format(con_str=con_str, **converted_dict) + elif ( + "total" in converted_dict + and "used_pct" in converted_dict + and "avail_pct" in converted_dict + and "avail" in converted_dict + ): + s += self.DEFAULT_FORMAT.format(con_str=con_str, **converted_dict) + else: + s += f"{converted_dict['used']} used" + + super().__init__(name, s) + + class SummaryLicenseLine(SummaryLine): + def __init__( + self, + license_dict: SummaryClusterLicenseAggDict, + ): + s = "" + + try: + # license_data was computed by uda + time_ = license_dict["latest_time"] + time_str = time_.isoformat() + + s += f"Latest ({time_str}): {file_size.size(license_dict['latest'])} Min: {file_size.size(license_dict['min'])} Max: {file_size.size(license_dict['max'])} Avg: {file_size.size(license_dict['avg'])}" + + except Exception: + # license_data was manually computed by asadm + s += f"Latest: {file_size.size(license_dict['latest'])}" + + super().__init__("License Usage", s) + + class NumberedList(list): + def __iter__(self) -> Iterator: + self.line_index = 1 + + def add_line_num(line) -> str: + s = " " * 3 + s += (str(self.line_index) + ".").ljust(3) + s += " " * 2 + s += str(line) + self.line_index += 1 + return s + + return (add_line_num(l) for l in super().__iter__()) + + def __str__(self) -> str: + return "\n".join(self) + @staticmethod - def _summary_cluster_list_view(cluster_dict: SummaryClusterDict, **ignore): - index = 1 - print( + def _summary_cluster_list_view( + cluster_dict: SummaryClusterDict, **ignore + ) -> list[str | NumberedList]: + lines: list[str | CliView.NumberedList] = [] + numbered_lines = CliView.NumberedList() + lines.append( "Cluster" + ( " (%s)" @@ -2091,7 +2190,7 @@ def _summary_cluster_list_view(cluster_dict: SummaryClusterDict, **ignore): else "" ) ) - print( + lines.append( "=======" + ( "==========================" @@ -2099,321 +2198,288 @@ def _summary_cluster_list_view(cluster_dict: SummaryClusterDict, **ignore): else "" ) ) - print() + lines.append("") if "cluster_name" in cluster_dict and len(cluster_dict["cluster_name"]) > 0: - print( - CliView.get_summary_line_prefix(index, "Cluster Name") - + ", ".join(cluster_dict["cluster_name"]) + numbered_lines.append( + CliView.SummaryLine( + "Cluster Name", ", ".join(cluster_dict["cluster_name"]) + ) ) - index += 1 - print( - CliView.get_summary_line_prefix(index, "Server Version") - + ", ".join(cluster_dict["server_version"]) - ) - index += 1 - - print( - CliView.get_summary_line_prefix(index, "OS Version") - + ", ".join(cluster_dict["os_version"]) + numbered_lines.append( + CliView.SummaryLine( + "Server Version", ", ".join(cluster_dict["server_version"]) + ) ) - index += 1 - print( - CliView.get_summary_line_prefix(index, "Cluster Size") - + ", ".join([str(cs) for cs in cluster_dict["cluster_size"]]) + numbered_lines.append( + CliView.SummaryLine("OS Version", ", ".join(cluster_dict["os_version"])) ) - index += 1 - print( - CliView.get_summary_line_prefix(index, "Devices") - + "Total %d, per-node %d%s" - % ( - cluster_dict["device_count"], - cluster_dict["device_count_per_node"], - " (number differs across nodes)" - if not cluster_dict["device_count_same_across_nodes"] - else "", + numbered_lines.append( + CliView.SummaryLine( + "Cluster Size", + ", ".join([str(cs) for cs in cluster_dict["cluster_size"]]), ) ) - index += 1 - print( - CliView.get_summary_line_prefix(index, "Memory") - + "Total %s, %.2f%% used (%s), %.2f%% available (%s)" - % ( - file_size.size(cluster_dict["memory"]["total"]).strip(), - cluster_dict["memory"]["used_pct"], - file_size.size(cluster_dict["memory"]["used"]).strip(), - cluster_dict["memory"]["avail_pct"], - file_size.size(cluster_dict["memory"]["avail"]).strip(), + numbered_lines.append( + CliView.SummaryLine( + "Devices", + "Total %d, per-node %d%s" + % ( + cluster_dict["device_count"], + cluster_dict["device_count_per_node"], + " (number differs across nodes)" + if not cluster_dict["device_count_same_across_nodes"] + else "", + ), ) ) - index += 1 if "pmem_index" in cluster_dict: - print( - CliView.get_summary_line_prefix(index, "Pmem Index") - + "Total %s, %.2f%% used (%s), %.2f%% available (%s)" - % ( - file_size.size(cluster_dict["pmem_index"]["total"]).strip(), - cluster_dict["pmem_index"]["used_pct"], - file_size.size(cluster_dict["pmem_index"]["used"]).strip(), - cluster_dict["pmem_index"]["avail_pct"], - file_size.size(cluster_dict["pmem_index"]["avail"]).strip(), + numbered_lines.append( + CliView.SummaryUsageLine( + "Pmem Index", + cluster_dict["pmem_index"], ) ) - index += 1 if "flash_index" in cluster_dict: - print( - CliView.get_summary_line_prefix(index, "Flash Index") - + "Total %s, %.2f%% used (%s), %.2f%% available (%s)" - % ( - file_size.size(cluster_dict["flash_index"]["total"]).strip(), - cluster_dict["flash_index"]["used_pct"], - file_size.size(cluster_dict["flash_index"]["used"]).strip(), - cluster_dict["flash_index"]["avail_pct"], - file_size.size(cluster_dict["flash_index"]["avail"]).strip(), + numbered_lines.append( + CliView.SummaryUsageLine( + "Flash Index", + cluster_dict["flash_index"], ) ) - index += 1 - if "device" in cluster_dict: - print( - CliView.get_summary_line_prefix(index, "Device") - + "Total %s, %.2f%% used (%s), %.2f%% available contiguous space (%s)" - % ( - file_size.size(cluster_dict["device"]["total"]).strip(), - cluster_dict["device"]["used_pct"], - file_size.size(cluster_dict["device"]["used"]).strip(), - cluster_dict["device"]["avail_pct"], - file_size.size(cluster_dict["device"]["avail"]).strip(), + if "shmem_index" in cluster_dict: + numbered_lines.append( + CliView.SummaryUsageLine( + "Shmem Index", + cluster_dict["shmem_index"], ) ) - index += 1 - if "pmem" in cluster_dict: - print( - CliView.get_summary_line_prefix(index, "Pmem") - + "Total %s, %.2f%% used (%s), %.2f%% available contiguous space (%s)" - % ( - file_size.size(cluster_dict["pmem"]["total"]).strip(), - cluster_dict["pmem"]["used_pct"], - file_size.size(cluster_dict["pmem"]["used"]).strip(), - cluster_dict["pmem"]["avail_pct"], - file_size.size(cluster_dict["pmem"]["avail"]).strip(), + if "memory_data_and_indexes" in cluster_dict: + numbered_lines.append( + CliView.SummaryUsageLine( + "Memory", + cluster_dict["memory_data_and_indexes"], + format_str=CliView.SummaryUsageLine.DEFAULT_FORMAT + + " includes data, pindex, and sindex", ) ) - index += 1 - data_summary = CliView.get_summary_line_prefix(index, "License Usage") + if "memory" in cluster_dict: + numbered_lines.append( + CliView.SummaryUsageLine( + "Memory", + cluster_dict["memory"], + contiguous=True, + ) + ) - try: - # license_data was computed by uda - time_ = cluster_dict["license_data"]["latest_time"] - time_.strftime("%H:%M:%S %m/%d/%Y") - - data_summary += "Latest (%s): %s Min: %s Max: %s Avg: %s" % ( - time_, - file_size.size(cluster_dict["license_data"]["latest"]), - file_size.size(cluster_dict["license_data"]["min"]), - file_size.size(cluster_dict["license_data"]["max"]), - file_size.size(cluster_dict["license_data"]["avg"]), + if "device" in cluster_dict: + numbered_lines.append( + CliView.SummaryUsageLine( + "Device", + cluster_dict["device"], + contiguous=True, + ) ) - except Exception: - # license_data was manually computed by asadm - data_summary += "Latest: %s" % ( - file_size.size(cluster_dict["license_data"]["latest"]) + + if "pmem" in cluster_dict: + numbered_lines.append( + CliView.SummaryUsageLine( + "Pmem", + cluster_dict["pmem"], + contiguous=True, + ) ) - print(data_summary) - index += 1 + if "license_data" in cluster_dict: + numbered_lines.append( + CliView.SummaryLicenseLine( + cluster_dict["license_data"], + ) + ) - print( - CliView.get_summary_line_prefix(index, "Active Namespaces") - + "%d of %d" % (cluster_dict["active_ns"], cluster_dict["ns_count"]) + numbered_lines.append( + CliView.SummaryLine( + "Active Namespaces", + "%d of %d" % (cluster_dict["active_ns"], cluster_dict["ns_count"]), + ) ) - index += 1 - print( - CliView.get_summary_line_prefix(index, "Active Features") - + ", ".join(sorted(cluster_dict["active_features"])) + numbered_lines.append( + CliView.SummaryLine( + "Active Features", ", ".join(sorted(cluster_dict["active_features"])) + ) ) - print("\n") + lines.append(numbered_lines) + lines.append("\n") + + return lines @staticmethod - def _summary_namespace_list_view(stats: SummaryNamespacesDict, **ignore): - print("Namespaces") - print("==========") - print() + def _summary_namespace_list_view( + stats: SummaryNamespacesDict, **ignore + ) -> list[str | NumberedList]: + lines: list[str | CliView.NumberedList] = [] + lines.append("Namespaces") + lines.append("==========") + lines.append("") for ns, ns_stats in stats.items(): - index = 1 - print( + ns_lines = CliView.NumberedList() + lines.append( " " + ( - "%s" % (terminal.fg_red() + ns + terminal.fg_clear()) + f"{terminal.fg_red()}{ns}{terminal.fg_clear()}" if ns_stats["migrations_in_progress"] else ns ) ) - print(" " + "=" * len(ns)) + lines.append(" " + "=" * len(ns)) - print( - CliView.get_summary_line_prefix(index, "Devices") - + "Total %d, per-node %d%s" - % ( - ns_stats["devices_total"], - ns_stats["devices_per_node"], - " (number differs across nodes)" - if not ns_stats["device_count_same_across_nodes"] - else "", + ns_lines.append( + CliView.SummaryLine( + "Devices", + "Total %d, per-node %d%s" + % ( + ns_stats["devices_total"], + ns_stats["devices_per_node"], + " (number differs across nodes)" + if not ns_stats["device_count_same_across_nodes"] + else "", + ), ) ) - index += 1 - print( - CliView.get_summary_line_prefix(index, "Memory") - + "Total %s, %.2f%% used (%s), %.2f%% available (%s)" - % ( - file_size.size(ns_stats["memory"]["total"]).strip(), - ns_stats["memory"]["used_pct"], - file_size.size(ns_stats["memory"]["used"]).strip(), - ns_stats["memory"]["avail_pct"], - file_size.size(ns_stats["memory"]["avail"]).strip(), + try: + ns_lines.append( + CliView.SummaryUsageLine("Pmem Index", ns_stats["pmem_index"]) ) - ) - index += 1 + except: + pass try: - print( - CliView.get_summary_line_prefix(index, "Pmem Index") - + "Total %s, %.2f%% used (%s), %.2f%% available (%s)" - % ( - file_size.size(ns_stats["pmem_index"]["total"]).strip(), - ns_stats["pmem_index"]["used_pct"], - file_size.size(ns_stats["pmem_index"]["used"]).strip(), - ns_stats["pmem_index"]["avail_pct"], - file_size.size(ns_stats["pmem_index"]["avail"]).strip(), - ) + ############ TODO finish using this function. Might need to change it to + # handle the "contiguous" space + ns_lines.append( + CliView.SummaryUsageLine("Flash Index", ns_stats["flash_index"]) ) - index += 1 except: pass try: - print( - CliView.get_summary_line_prefix(index, "Flash Index") - + "Total %s, %.2f%% used (%s), %.2f%% available (%s)" - % ( - file_size.size(ns_stats["flash_index"]["total"]).strip(), - ns_stats["flash_index"]["used_pct"], - file_size.size(ns_stats["flash_index"]["used"]).strip(), - ns_stats["flash_index"]["avail_pct"], - file_size.size(ns_stats["flash_index"]["avail"]).strip(), - ) + ns_lines.append( + CliView.SummaryUsageLine("Shmem Index", ns_stats["shmem_index"]) ) - index += 1 except: pass - if "device_total" in ns_stats: + if "memory_data_and_indexes" in ns_stats: + ns_lines.append( + CliView.SummaryUsageLine( + "Memory", + ns_stats["memory_data_and_indexes"], + format_str=CliView.SummaryUsageLine.DEFAULT_FORMAT + + " includes data, pindex, and sindex", + ) + ) + + if "memory" in ns_stats: + ns_lines.append( + CliView.SummaryUsageLine( + "Memory", ns_stats["memory"], contiguous=True + ) + ) + + if "device" in ns_stats: try: - print( - CliView.get_summary_line_prefix(index, "Device") - + "Total %s, %.2f%% used (%s), %.2f%% available contiguous space (%s)" - % ( - file_size.size(ns_stats["device"]["total"]).strip(), - ns_stats["device"]["used_pct"], - file_size.size(ns_stats["device"]["used"]).strip(), - ns_stats["device"]["avail_pct"], - file_size.size(ns_stats["device"]["avail"]).strip(), + ns_lines.append( + CliView.SummaryUsageLine( + "Device", ns_stats["device"], contiguous=True ) ) - index += 1 except: pass - elif "pmem_total" in ns_stats: + + if "pmem" in ns_stats: try: - print( - CliView.get_summary_line_prefix(index, "Pmem") - + "Total %s, %.2f%% used (%s), %.2f%% available contiguous space (%s)" - % ( - file_size.size(ns_stats["pmem"]["total"]).strip(), - ns_stats["pmem"]["used_pct"], - file_size.size(ns_stats["pmem"]["used"]).strip(), - ns_stats["pmem"]["avail_pct"], - file_size.size(ns_stats["pmem"]["avail"]).strip(), + ns_lines.append( + CliView.SummaryUsageLine( + "Pmem", ns_stats["pmem"], contiguous=True ) ) - index += 1 except: pass - license_usage = CliView.get_summary_line_prefix(index, "License Usage") - - try: - # license_data was computed by uda - time_ = ns_stats["license_data"]["latest_time"] - time = time_.strftime("%m/%d/%Y %Z %H:%M:%S") - license_usage += "Latest (%s): %s Min: %s Max: %s Avg: %s" % ( - time, - file_size.size(ns_stats["license_data"]["latest"]), - file_size.size(ns_stats["license_data"]["min"]), - file_size.size(ns_stats["license_data"]["max"]), - file_size.size(ns_stats["license_data"]["avg"]), - ) - except Exception: - # license_data was manually computed by asadm - license_usage += "Latest: %s" % ( - file_size.size(ns_stats["license_data"]["latest"]) + if "license_data" in ns_stats: + ns_lines.append( + CliView.SummaryLicenseLine( + ns_stats["license_data"], + ) ) - print(license_usage) - - print( - CliView.get_summary_line_prefix(index, "Replication Factor") - + "%s" % (",".join([str(rf) for rf in ns_stats["repl_factor"]])) + ns_lines.append( + CliView.SummaryLine( + "Replication Factor", + "%s" % (",".join([str(rf) for rf in ns_stats["repl_factor"]])), + ) ) - index += 1 if "cache_read_pct" in ns_stats: - print( - CliView.get_summary_line_prefix(index, "Post-Write-Queue Hit-Rate") - + "%s" - % (file_size.size(ns_stats["cache_read_pct"], file_size.si_float)) + ns_lines.append( + CliView.SummaryLine( + "Post-Write-Queue Hit-Rate", + "%s" + % ( + file_size.size( + ns_stats["cache_read_pct"], file_size.si_float + ) + ), + ) ) - index += 1 if "rack_aware" in ns_stats: - print( - CliView.get_summary_line_prefix(index, "Rack-aware") - + "%s" % (str(ns_stats["rack_aware"])) + ns_lines.append( + CliView.SummaryLine( + "Rack-aware", "%s" % (str(ns_stats["rack_aware"])) + ) ) - index += 1 - print( - CliView.get_summary_line_prefix(index, "Master Objects") - + "%s" - % (file_size.size(ns_stats["master_objects"], file_size.si_float)) + ns_lines.append( + CliView.SummaryLine( + "Master Objects", + "%s" + % (file_size.size(ns_stats["master_objects"], file_size.si_float)), + ) ) - index += 1 if "compression_ratio" in ns_stats: - print( - CliView.get_summary_line_prefix(index, "Compression-ratio") - + "%s" % (str(ns_stats["compression_ratio"])) + ns_lines.append( + CliView.SummaryLine( + "Compression-ratio", "%s" % (str(ns_stats["compression_ratio"])) + ) ) - index += 1 - print() + + lines.append(ns_lines) + lines.append("") + + return lines @staticmethod def print_summary(summary: SummaryDict, list_view=True): if list_view: - CliView._summary_cluster_list_view(summary["CLUSTER"]) - CliView._summary_namespace_list_view(summary["NAMESPACES"]) + lines: list[Any] = CliView._summary_cluster_list_view(summary["CLUSTER"]) + lines.extend(CliView._summary_namespace_list_view(summary["NAMESPACES"])) + + for line in lines: + print(line) else: CliView._summary_cluster_table_view(summary["CLUSTER"]) CliView._summary_namespace_table_view(summary["NAMESPACES"]) diff --git a/test/unit/utils/test_common.py b/test/unit/utils/test_common.py index 64e36178..8d5c63d8 100644 --- a/test/unit/utils/test_common.py +++ b/test/unit/utils/test_common.py @@ -51,542 +51,635 @@ def run_test_case( expected_summary_dict, summary_dict, "Input: " + str(namespace_stats) ) - def test_success_with_out_agent(self): - test_cases = [ - { - "ns_stats": {}, - "license_data": None, - "server_builds": {}, - "allow_unstable": False, - "exp_summary_dict": {}, - }, - { - "ns_stats": { - "foo": { - "1.1.1.1": { - "master_objects": 100, - "effective_replication_factor": 2, - "pmem_used_bytes": 99000, - } - } - }, - "license_data": None, - "server_builds": {"1.1.1.1": "5.0.0.0"}, - "allow_unstable": False, - "exp_summary_dict": { - "CLUSTER": {"license_data": {"latest": 46000}}, - "NAMESPACES": {"foo": {"license_data": {"latest": 46000}}}, - }, - }, - { - "ns_stats": { - "foo": { - "1.1.1.1": { - "master_objects": 100, - "effective_replication_factor": 2, - "pmem_used_bytes": 99000, - }, - "2.2.2.2": { - "master_objects": 100, - "effective_replication_factor": 0, # tie-breaker node - "pmem_used_bytes": 99000, - }, - } + @parameterized.expand( + [ + ( + { + "ns_stats": {}, + "license_data": None, + "server_builds": {}, + "allow_unstable": False, + "exp_summary_dict": {}, }, - "server_builds": {"1.1.1.1": "5.0.0.0"}, - "license_data": {}, - "allow_unstable": False, - "exp_summary_dict": { - "CLUSTER": { - "license_data": {"latest": int((99000 / 2) - (35 * 100))} - }, - "NAMESPACES": { + ), + ( + { + "ns_stats": { "foo": { - "license_data": {"latest": int((99000 / 2) - (35 * 100))} + "1.1.1.1": { + "master_objects": 100, + "effective_replication_factor": 2, + "pmem_used_bytes": 99000, + } } }, + "license_data": None, + "server_builds": {"1.1.1.1": "5.0.0.0"}, + "allow_unstable": False, + "exp_summary_dict": { + "CLUSTER": {"license_data": {"latest": 46000}}, + "NAMESPACES": {"foo": {"license_data": {"latest": 46000}}}, + }, }, - }, - { - "ns_stats": { - "foo": { - "1.1.1.1": { - "master_objects": 100, - "effective_replication_factor": 2, - "pmem_used_bytes": 99000, + ), + ( + { + "ns_stats": { + "foo": { + "1.1.1.1": { + "master_objects": 100, + "effective_replication_factor": 2, + "pmem_used_bytes": 99000, + }, + "2.2.2.2": { + "master_objects": 100, + "effective_replication_factor": 0, # tie-breaker node + "pmem_used_bytes": 99000, + }, + } + }, + "server_builds": {"1.1.1.1": "5.0.0.0"}, + "license_data": {}, + "allow_unstable": False, + "exp_summary_dict": { + "CLUSTER": { + "license_data": {"latest": int((99000 / 2) - (35 * 100))} }, - "2.2.2.2": { - "master_objects": 100, - "effective_replication_factor": 2, # tie-breaker node - "pmem_used_bytes": 99000, + "NAMESPACES": { + "foo": { + "license_data": { + "latest": int((99000 / 2) - (35 * 100)) + } + } }, - } + }, }, - "server_builds": {"1.1.1.1": "5.0.0.0", "2.2.2.2": "6.0.0.0"}, - "license_data": {}, - "allow_unstable": False, - "exp_summary_dict": { - "CLUSTER": { - "license_data": { - "latest": int( - ((99000 / 2) - (35 * 100)) + ((99000 / 2) - (39 * 100)) - ) + ), + ( + { + "ns_stats": { + "foo": { + "1.1.1.1": { + "master_objects": 100, + "effective_replication_factor": 2, + "pmem_used_bytes": 99000, + }, + "2.2.2.2": { + "master_objects": 100, + "effective_replication_factor": 2, # tie-breaker node + "pmem_used_bytes": 99000, + }, } }, - "NAMESPACES": { - "foo": { + "server_builds": {"1.1.1.1": "5.0.0.0", "2.2.2.2": "6.0.0.0"}, + "license_data": {}, + "allow_unstable": False, + "exp_summary_dict": { + "CLUSTER": { "license_data": { "latest": int( ((99000 / 2) - (35 * 100)) + ((99000 / 2) - (39 * 100)) ) } - } + }, + "NAMESPACES": { + "foo": { + "license_data": { + "latest": int( + ((99000 / 2) - (35 * 100)) + + ((99000 / 2) - (39 * 100)) + ) + } + } + }, }, }, - }, - { - "ns_stats": { - "foo": { - "1.1.1.1": { - "master_objects": 100, - "effective_replication_factor": 2, - "device_used_bytes": 7200, - } - } - }, - "server_builds": {"1.1.1.1": "5.0.0.0"}, - "license_data": None, - "allow_unstable": False, - "exp_summary_dict": { - "CLUSTER": {"license_data": {"latest": 100}}, - "NAMESPACES": {"foo": {"license_data": {"latest": 100}}}, - }, - }, - { - "ns_stats": { - "foo": { - "1.1.1.1": { - "master_objects": 100, - "effective_replication_factor": 2, - "pmem_used_bytes": 8000, - "memory_used_bytes": 800, + ), + ( + { + "ns_stats": { + "foo": { + "1.1.1.1": { + "master_objects": 100, + "effective_replication_factor": 2, + "device_used_bytes": 7200, + } } }, - "bar": { - "1.1.1.1": { - "master_objects": 50, - "effective_replication_factor": 3, - "device_used_bytes": 6000, - "memory_used_bytes": 3300, - } + "server_builds": {"1.1.1.1": "5.0.0.0"}, + "license_data": None, + "allow_unstable": False, + "exp_summary_dict": { + "CLUSTER": {"license_data": {"latest": 100}}, + "NAMESPACES": {"foo": {"license_data": {"latest": 100}}}, }, }, - "license_data": None, - "server_builds": {"1.1.1.1": "5.0.0.0"}, - "allow_unstable": False, - "exp_summary_dict": { - "CLUSTER": {"license_data": {"latest": 500 + 250}}, - "NAMESPACES": { - "foo": {"license_data": {"latest": 500}}, - "bar": {"license_data": {"latest": 250}}, + ), + ( + { + "ns_stats": { + "foo": { + "1.1.1.1": { + "master_objects": 100, + "effective_replication_factor": 2, + "pmem_used_bytes": 8000, + "memory_used_bytes": 800, + } + }, + "bar": { + "1.1.1.1": { + "master_objects": 50, + "effective_replication_factor": 3, + "device_used_bytes": 6000, + "memory_used_bytes": 3300, + } + }, + }, + "license_data": None, + "server_builds": {"1.1.1.1": "5.0.0.0"}, + "allow_unstable": False, + "exp_summary_dict": { + "CLUSTER": {"license_data": {"latest": 500 + 250}}, + "NAMESPACES": { + "foo": {"license_data": {"latest": 500}}, + "bar": {"license_data": {"latest": 250}}, + }, }, }, - }, - { - "ns_stats": { - "foo": { - "1.1.1.1": { - "master_objects": 100, - "effective_replication_factor": 2, - "device_used_bytes": 7200, - "memory_used_bytes": 800, + ), + ( + { + "ns_stats": { + "foo": { + "1.1.1.1": { + "master_objects": 100, + "effective_replication_factor": 2, + "device_used_bytes": 7200, + "memory_used_bytes": 800, + }, + "2.2.2.2": { + "master_objects": 10, + "effective_replication_factor": 2, + "device_used_bytes": 3200, + "memory_used_bytes": 10000, + }, }, - "2.2.2.2": { - "master_objects": 10, - "effective_replication_factor": 2, - "device_used_bytes": 3200, - "memory_used_bytes": 10000, + "bar": { + "1.1.1.1": { + "master_objects": 50, + "effective_replication_factor": 3, + "pmem_used_bytes": 50000, + }, + "2.2.2.2": { + "master_objects": 10, + "effective_replication_factor": 3, + "pmem_used_bytes": 10000, + }, }, }, - "bar": { - "1.1.1.1": { - "master_objects": 50, - "effective_replication_factor": 3, - "pmem_used_bytes": 50000, + "license_data": None, + "allow_unstable": False, + "server_builds": {"1.1.1.1": "5.0.0.0", "2.2.2.2": "5.0.0.0"}, + "exp_summary_dict": { + "CLUSTER": { + "license_data": { + "latest": int( + ((7200 + 3200) / 2) # foo + - (110 * 35) + + ((50000 + 10000) / 3) # bar + - (35 * 60) + ) + }, }, - "2.2.2.2": { - "master_objects": 10, - "effective_replication_factor": 3, - "pmem_used_bytes": 10000, + "NAMESPACES": { + "foo": { + "license_data": { + "latest": int(((7200 + 3200) / 2) - (110 * 35)) + } + }, + "bar": { + "license_data": { + "latest": int(((50000 + 10000) / 3) - (35 * 60)) + } + }, }, }, }, - "license_data": None, - "allow_unstable": False, - "server_builds": {"1.1.1.1": "5.0.0.0", "2.2.2.2": "5.0.0.0"}, - "exp_summary_dict": { - "CLUSTER": { - "license_data": { - "latest": int( - ((7200 + 3200) / 2) - - (110 * 35) - + ((50000 + 10000) / 3) - - (35 * 60) - ) - }, - }, - "NAMESPACES": { + ), + ( + { + "ns_stats": { "foo": { - "license_data": { - "latest": int(((7200 + 3200) / 2) - (110 * 35)) - } + "1.1.1.1": { + "master_objects": 100, + "effective_replication_factor": 2, + "data_used_bytes": 7200, + "data_compression_ratio": 0.5, + }, + "2.2.2.2": { + "master_objects": 10, + "effective_replication_factor": 2, + "data_used_bytes": 3200, + "data_compression_ratio": 0.2, + }, }, "bar": { + "1.1.1.1": { + "master_objects": 50, + "effective_replication_factor": 3, + "data_used_bytes": 50000, + }, + "2.2.2.2": { + "master_objects": 10, + "effective_replication_factor": 3, + "data_used_bytes": 10000, + }, + }, + }, + "license_data": None, + "allow_unstable": False, + "server_builds": {"1.1.1.1": "5.0.0.0", "2.2.2.2": "5.0.0.0"}, + "exp_summary_dict": { + "CLUSTER": { "license_data": { - "latest": int(((50000 + 10000) / 3) - (35 * 60)) - } + "latest": int( + ((7200 / 0.5 + 3200 / 0.2) / 2) # foo + - (110 * 35) + + ((50000 + 10000) / 3) # bar + - (35 * 60) + ) + }, + }, + "NAMESPACES": { + "foo": { + "license_data": { + "latest": int( + ((7200 / 0.5 + 3200 / 0.2) / 2) - (110 * 35) + ) + } + }, + "bar": { + "license_data": { + "latest": int(((50000 + 10000) / 3) - (35 * 60)) + } + }, }, }, }, - }, + ), ] + ) + # Try to parameterize these tests and add a test using "data_used_bytes" + def test_success_with_out_agent(self, tc): + self.run_test_case( + tc["ns_stats"], + tc["server_builds"], + tc["license_data"], + tc["allow_unstable"], + tc["exp_summary_dict"], + ) - for tc in test_cases: - self.run_test_case( - tc["ns_stats"], - tc["server_builds"], - tc["license_data"], - tc["allow_unstable"], - tc["exp_summary_dict"], - ) - - def test_success_with_agent(self): - test_cases = [ - { - "ns_stats": {"foo": {}}, - "license_data": { - "license_usage": { - "count": 1, - "entries": [ - { - "time": "2022-04-07T22:59:47", - "unique_data_bytes": 500, - "level": "info", - "cluster_stable": True, - "namespaces": {"foo": {"unique_data_bytes": 100}}, + @parameterized.expand( + [ + ( + { + "ns_stats": {"foo": {}}, + "license_data": { + "license_usage": { + "count": 1, + "entries": [ + { + "time": "2022-04-07T22:59:47", + "unique_data_bytes": 500, + "level": "info", + "cluster_stable": True, + "namespaces": {"foo": {"unique_data_bytes": 100}}, + } + ], + }, + }, + "allow_unstable": False, + "exp_summary_dict": { + "CLUSTER": { + "license_data": { + "latest_time": datetime.fromisoformat( + "2022-04-07T22:59:47" + ), + "latest": 500, + "min": 500, + "max": 500, + "avg": 500, + } + }, + "NAMESPACES": { + "foo": { + "license_data": { + "latest_time": datetime.fromisoformat( + "2022-04-07T22:59:47" + ), + "latest": 100, + "min": 100, + "max": 100, + "avg": 100, + } } - ], + }, }, }, - "allow_unstable": False, - "exp_summary_dict": { - "CLUSTER": { - "license_data": { - "latest_time": datetime.fromisoformat( - "2022-04-07T22:59:47" - ), - "latest": 500, - "min": 500, - "max": 500, - "avg": 500, + ), + ( + { + "ns_stats": {"foo": {}}, + "license_data": { + "license_usage": { + "count": 1, + "entries": [ + { + "time": "2022-04-07T22:59:47", + "unique_data_bytes": 500, + "level": "info", + "cluster_stable": True, + "namespaces": {"foo": {"unique_data_bytes": 100}}, + }, + { + "latest_time": "2022-04-07T22:59:47", + "unique_data_bytes": 0, + "level": "error", + "namespaces": {"foo": {"unique_data_bytes": 100}}, + }, + ], } }, - "NAMESPACES": { - "foo": { + "allow_unstable": False, + "exp_summary_dict": { + "CLUSTER": { "license_data": { "latest_time": datetime.fromisoformat( "2022-04-07T22:59:47" ), - "latest": 100, - "min": 100, - "max": 100, - "avg": 100, + "latest": 500, + "min": 500, + "max": 500, + "avg": 500, } - } + }, + "NAMESPACES": { + "foo": { + "license_data": { + "latest_time": datetime.fromisoformat( + "2022-04-07T22:59:47" + ), + "latest": 100, + "min": 100, + "max": 100, + "avg": 100, + } + } + }, }, }, - }, - { - "ns_stats": {"foo": {}}, - "license_data": { - "license_usage": { - "count": 1, - "entries": [ - { - "time": "2022-04-07T22:59:47", - "unique_data_bytes": 500, - "level": "info", - "cluster_stable": True, - "namespaces": {"foo": {"unique_data_bytes": 100}}, - }, - { - "latest_time": "2022-04-07T22:59:47", - "unique_data_bytes": 0, - "level": "error", - "namespaces": {"foo": {"unique_data_bytes": 100}}, - }, - ], - } - }, - "allow_unstable": False, - "exp_summary_dict": { - "CLUSTER": { - "license_data": { - "latest_time": datetime.fromisoformat( - "2022-04-07T22:59:47" - ), - "latest": 500, - "min": 500, - "max": 500, - "avg": 500, + ), + ( + { + "ns_stats": {"foo": {}}, + "license_data": { + "license_usage": { + "count": 2, + "entries": [ + { + "time": "2022-04-07T22:58:47", + "unique_data_bytes": 500, + "level": "info", + "cluster_stable": True, + "namespaces": {"foo": {"unique_data_bytes": 1000}}, + }, + { + "time": "2022-04-07T22:59:47", + "unique_data_bytes": 100, + "level": "info", + "cluster_stable": True, + "namespaces": {"foo": {"unique_data_bytes": 500}}, + }, + {"unique_data_bytes": 0, "level": "error"}, + ], } }, - "NAMESPACES": { - "foo": { + "allow_unstable": False, + "exp_summary_dict": { + "CLUSTER": { "license_data": { "latest_time": datetime.fromisoformat( "2022-04-07T22:59:47" ), "latest": 100, "min": 100, - "max": 100, - "avg": 100, + "max": 500, + "avg": 300, } - } + }, + "NAMESPACES": { + "foo": { + "license_data": { + "latest_time": datetime.fromisoformat( + "2022-04-07T22:59:47" + ), + "latest": 500, + "min": 500, + "max": 1000, + "avg": 750, + } + } + }, }, }, - }, - { - "ns_stats": {"foo": {}}, - "license_data": { - "license_usage": { - "count": 2, - "entries": [ - { - "time": "2022-04-07T22:58:47", - "unique_data_bytes": 500, - "level": "info", - "cluster_stable": True, - "namespaces": {"foo": {"unique_data_bytes": 1000}}, - }, - { - "time": "2022-04-07T22:59:47", - "unique_data_bytes": 100, - "level": "info", - "cluster_stable": True, - "namespaces": {"foo": {"unique_data_bytes": 500}}, - }, - {"unique_data_bytes": 0, "level": "error"}, - ], - } - }, - "allow_unstable": False, - "exp_summary_dict": { - "CLUSTER": { - "license_data": { - "latest_time": datetime.fromisoformat( - "2022-04-07T22:59:47" - ), - "latest": 100, - "min": 100, - "max": 500, - "avg": 300, - } - }, - "NAMESPACES": { + ), + ( + { + "ns_stats": { "foo": { - "license_data": { - "latest_time": datetime.fromisoformat( - "2022-04-07T22:59:47" - ), - "latest": 500, - "min": 500, - "max": 1000, - "avg": 750, + "1.1.1.1": { + "master_objects": 100, + "effective_replication_factor": 2, + "pmem_used_bytes": 99000, } } }, - }, - }, - { - "ns_stats": { - "foo": { - "1.1.1.1": { - "master_objects": 100, - "effective_replication_factor": 2, - "pmem_used_bytes": 99000, + "license_data": { + "license_usage": { + "count": 3, + "entries": [ + {"unique_data_bytes": 500, "level": "error"}, + {"unique_data_bytes": 100, "level": "error"}, + {"unique_data_bytes": 0, "level": "error"}, + ], } - } - }, - "license_data": { - "license_usage": { - "count": 3, - "entries": [ - {"unique_data_bytes": 500, "level": "error"}, - {"unique_data_bytes": 100, "level": "error"}, - {"unique_data_bytes": 0, "level": "error"}, - ], - } + }, + "allow_unstable": False, + "exp_summary_dict": { + "CLUSTER": {"license_data": {"latest": 46000}}, + "NAMESPACES": {"foo": {"license_data": {"latest": 46000}}}, + }, }, - "allow_unstable": False, - "exp_summary_dict": { - "CLUSTER": {"license_data": {"latest": 46000}}, - "NAMESPACES": {"foo": {"license_data": {"latest": 46000}}}, - }, - }, - { - "ns_stats": { - "foo": { - "1.1.1.1": { - "master_objects": 100, - "effective_replication_factor": 2, - "pmem_used_bytes": 99000, + ), + ( + { + "ns_stats": { + "foo": { + "1.1.1.1": { + "master_objects": 100, + "effective_replication_factor": 2, + "pmem_used_bytes": 99000, + } } - } - }, - "license_data": { - "license_usage": { - "count": 2, - "entries": [ - { - "time": "2022-04-07T22:58:47", - "unique_data_bytes": 500, - "level": "info", - "cluster_stable": False, - "namespaces": {"foo": {"unique_data_bytes": 1000}}, - }, - { - "time": "2022-04-07T22:59:47", - "unique_data_bytes": 100, - "level": "info", - "cluster_stable": False, - "namespaces": {"foo": {"unique_data_bytes": 500}}, - }, - {"unique_data_bytes": 0, "level": "error"}, - ], - } - }, - "allow_unstable": False, - "exp_summary_dict": { - "CLUSTER": {"license_data": {"latest": 46000}}, - "NAMESPACES": {"foo": {"license_data": {"latest": 46000}}}, - }, - }, - { - "ns_stats": {"foo": {}}, - "license_data": { - "license_usage": { - "count": 2, - "entries": [ - { - "time": "2022-04-07T22:58:47", - "unique_data_bytes": 500, - "level": "info", - "cluster_stable": False, - "namespaces": {"foo": {"unique_data_bytes": 1000}}, - }, - { - "time": "2022-04-07T22:59:47", - "unique_data_bytes": 100, - "level": "info", - "cluster_stable": False, - "namespaces": {"foo": {"unique_data_bytes": 500}}, - }, - {"unique_data_bytes": 0, "level": "error"}, - ], - } + }, + "license_data": { + "license_usage": { + "count": 2, + "entries": [ + { + "time": "2022-04-07T22:58:47", + "unique_data_bytes": 500, + "level": "info", + "cluster_stable": False, + "namespaces": {"foo": {"unique_data_bytes": 1000}}, + }, + { + "time": "2022-04-07T22:59:47", + "unique_data_bytes": 100, + "level": "info", + "cluster_stable": False, + "namespaces": {"foo": {"unique_data_bytes": 500}}, + }, + {"unique_data_bytes": 0, "level": "error"}, + ], + } + }, + "allow_unstable": False, + "exp_summary_dict": { + "CLUSTER": {"license_data": {"latest": 46000}}, + "NAMESPACES": {"foo": {"license_data": {"latest": 46000}}}, + }, }, - "allow_unstable": True, - "exp_summary_dict": { - "CLUSTER": { - "license_data": { - "latest_time": datetime.fromisoformat( - "2022-04-07T22:59:47" - ), - "latest": 100, - "min": 100, - "max": 500, - "avg": 300, + ), + ( + { + "ns_stats": {"foo": {}}, + "license_data": { + "license_usage": { + "count": 2, + "entries": [ + { + "time": "2022-04-07T22:58:47", + "unique_data_bytes": 500, + "level": "info", + "cluster_stable": False, + "namespaces": {"foo": {"unique_data_bytes": 1000}}, + }, + { + "time": "2022-04-07T22:59:47", + "unique_data_bytes": 100, + "level": "info", + "cluster_stable": False, + "namespaces": {"foo": {"unique_data_bytes": 500}}, + }, + {"unique_data_bytes": 0, "level": "error"}, + ], } }, - "NAMESPACES": { - "foo": { + "allow_unstable": True, + "exp_summary_dict": { + "CLUSTER": { "license_data": { "latest_time": datetime.fromisoformat( "2022-04-07T22:59:47" ), - "latest": 500, - "min": 500, - "max": 1000, - "avg": 750, + "latest": 100, + "min": 100, + "max": 500, + "avg": 300, } - } + }, + "NAMESPACES": { + "foo": { + "license_data": { + "latest_time": datetime.fromisoformat( + "2022-04-07T22:59:47" + ), + "latest": 500, + "min": 500, + "max": 1000, + "avg": 750, + } + } + }, }, }, - }, - { - "ns_stats": {"foo": {}}, - "license_data": { - "license_usage": { - "count": 2, - "entries": [ - { - "time": "2022-04-07T22:58:47", - "unique_data_bytes": 500, - "level": "info", - "cluster_stable": True, - "namespaces": {"foo": {"unique_data_bytes": 1000}}, - }, - { - "time": "2022-04-07T22:59:47", - "unique_data_bytes": 100, - "level": "info", - "cluster_stable": False, - "namespaces": {"foo": {"unique_data_bytes": 500}}, - }, - {"unique_data_bytes": 0, "level": "error"}, - ], - } - }, - "allow_unstable": False, - "exp_summary_dict": { - "CLUSTER": { - "license_data": { - "latest_time": datetime.fromisoformat( - "2022-04-07T22:58:47" - ), - "latest": 500, - "min": 500, - "max": 500, - "avg": 500, + ), + ( + { + "ns_stats": {"foo": {}}, + "license_data": { + "license_usage": { + "count": 2, + "entries": [ + { + "time": "2022-04-07T22:58:47", + "unique_data_bytes": 500, + "level": "info", + "cluster_stable": True, + "namespaces": {"foo": {"unique_data_bytes": 1000}}, + }, + { + "time": "2022-04-07T22:59:47", + "unique_data_bytes": 100, + "level": "info", + "cluster_stable": False, + "namespaces": {"foo": {"unique_data_bytes": 500}}, + }, + {"unique_data_bytes": 0, "level": "error"}, + ], } }, - "NAMESPACES": { - "foo": { + "allow_unstable": False, + "exp_summary_dict": { + "CLUSTER": { "license_data": { "latest_time": datetime.fromisoformat( "2022-04-07T22:58:47" ), - "latest": 1000, - "min": 1000, - "max": 1000, - "avg": 1000, + "latest": 500, + "min": 500, + "max": 500, + "avg": 500, } - } + }, + "NAMESPACES": { + "foo": { + "license_data": { + "latest_time": datetime.fromisoformat( + "2022-04-07T22:58:47" + ), + "latest": 1000, + "min": 1000, + "max": 1000, + "avg": 1000, + } + } + }, }, }, - }, + ), ] - - for tc in test_cases: - self.run_test_case( - tc["ns_stats"], - {"1.1.1.1": "5.0.0.0"}, - tc["license_data"], - tc["allow_unstable"], - tc["exp_summary_dict"], - ) + ) + def test_success_with_agent(self, tc): + self.run_test_case( + tc["ns_stats"], + {"1.1.1.1": "5.0.0.0"}, + tc["license_data"], + tc["allow_unstable"], + tc["exp_summary_dict"], + ) class CreateStopWritesSummaryTests(asynctest.TestCase): @@ -812,6 +905,7 @@ def create_tc( } }, ), + # stop_writes triggered by device_available_pct create_tc( ns_stats={ "1.1.1.1": { @@ -837,6 +931,7 @@ def create_tc( } }, ), + # stop_writes not triggered by pmem_available_pct create_tc( ns_stats={ "1.1.1.1": { @@ -1076,24 +1171,25 @@ def create_tc( } }, ), - # stop_writes is not triggered by memory_used_bytes + # stop_writes not triggered by data_used_bytes create_tc( ns_stats={ "1.1.1.1": { "ns1": { "stop_writes": "true", - "memory_used_bytes": "10", + "data_used_bytes": "10", + "data_total_bytes": "100", } }, }, ns_config={ - "1.1.1.1": {"ns1": {"stop-writes-pct": "90", "memory-size": "100"}}, + "1.1.1.1": {"ns1": {"storage-engine.stop-writes-used-pct": "90"}}, }, expected={ "1.1.1.1": { - ("ns1", None, "memory_used_bytes"): { - "metric": "memory_used_bytes", - "config": "stop-writes-pct", + ("ns1", None, "data_used_bytes"): { + "metric": "data_used_bytes", + "config": "storage-engine.stop-writes-used-pct", "stop_writes": False, "metric_usage": 10, "metric_threshold": 90, @@ -1102,24 +1198,77 @@ def create_tc( } }, ), - # stop_writes is triggered by memory_used_bytes + # stop_writes is triggered by data_used_bytes create_tc( ns_stats={ "1.1.1.1": { "ns1": { "stop_writes": "true", - "memory_used_bytes": "90", + "data_used_bytes": "90", + "data_total_bytes": "100", } }, }, ns_config={ - "1.1.1.1": {"ns1": {"stop-writes-pct": "90", "memory-size": "100"}}, + "1.1.1.1": {"ns1": {"storage-engine.stop-writes-used-pct": "90"}}, }, expected={ "1.1.1.1": { - ("ns1", None, "memory_used_bytes"): { - "metric": "memory_used_bytes", - "config": "stop-writes-pct", + ("ns1", None, "data_used_bytes"): { + "metric": "data_used_bytes", + "config": "storage-engine.stop-writes-used-pct", + "stop_writes": True, + "metric_usage": 90, + "metric_threshold": 90, + "namespace": "ns1", + }, + } + }, + ), + # stop_writes is not triggered by memory_used_bytes + create_tc( + ns_stats={ + "1.1.1.1": { + "ns1": { + "stop_writes": "true", + "memory_used_bytes": "10", + } + }, + }, + ns_config={ + "1.1.1.1": {"ns1": {"stop-writes-pct": "90", "memory-size": "100"}}, + }, + expected={ + "1.1.1.1": { + ("ns1", None, "memory_used_bytes"): { + "metric": "memory_used_bytes", + "config": "stop-writes-pct", + "stop_writes": False, + "metric_usage": 10, + "metric_threshold": 90, + "namespace": "ns1", + }, + } + }, + ), + # stop_writes is triggered by memory_used_bytes + create_tc( + ns_stats={ + "1.1.1.1": { + "ns1": { + "stop_writes": "true", + "memory_used_bytes": "90", + } + }, + }, + ns_config={ + "1.1.1.1": {"ns1": {"stop-writes-pct": "90", "memory-size": "100"}}, + }, + expected={ + "1.1.1.1": { + ("ns1", None, "memory_used_bytes"): { + "metric": "memory_used_bytes", + "config": "stop-writes-pct", "stop_writes": True, "metric_usage": 90, "metric_threshold": 90, @@ -1134,7 +1283,7 @@ def create_tc( "1.1.1.1": { ("ns1", "set1"): { "memory_data_bytes": "10", - "device_data_bytes": "0", + "device_data_bytes": "100000", } } }, @@ -1159,6 +1308,7 @@ def create_tc( "1.1.1.1": { ("ns1", "set1"): { "memory_data_bytes": "100", + "device_data_bytes": "100000", } } }, @@ -1226,6 +1376,55 @@ def create_tc( } }, ), + # stop_writes is not triggered by set.data_used_bytes + create_tc( + set_stats={ + "1.1.1.1": { + ("ns1", "set1"): { + "data_used_bytes": "10", + "device_data_bytes": "0", + } + } + }, + set_config={"1.1.1.1": {("ns1", "set1"): {"stop-writes-size": "100"}}}, + expected={ + "1.1.1.1": { + ("ns1", "set1", "data_used_bytes"): { + "metric": "data_used_bytes", + "config": "stop-writes-size", + "stop_writes": False, + "metric_usage": 10, + "metric_threshold": 100, + "namespace": "ns1", + "set": "set1", + }, + } + }, + ), + # stop_writes is triggered by set.data_used_bytes + create_tc( + set_stats={ + "1.1.1.1": { + ("ns1", "set1"): { + "data_used_bytes": "100", + } + } + }, + set_config={"1.1.1.1": {("ns1", "set1"): {"stop-writes-size": "100"}}}, + expected={ + "1.1.1.1": { + ("ns1", "set1", "data_used_bytes"): { + "metric": "data_used_bytes", + "config": "stop-writes-size", + "stop_writes": True, + "metric_usage": 100, + "metric_threshold": 100, + "namespace": "ns1", + "set": "set1", + }, + } + }, + ), # stop_writes is not triggered by set.objects create_tc( set_stats={ @@ -1276,7 +1475,7 @@ def create_tc( ), ], ) - def test_summary_creation( + def test_stop_writes_summary_creation( self, service_stats, ns_stats, ns_config, set_stats, set_config, expected ): self.assertDictEqual( @@ -1316,3 +1515,825 @@ def test_active_stop_writes(self, stop_writes_dict, expected): common.active_stop_writes(stop_writes_dict), expected, ) + + +class CreateSummaryTests(unittest.TestCase): + maxDiff = None + + @staticmethod + def create_tc( + service_stats={}, + ns_stats={}, + xdr_dc_stats={}, + metadata={}, + license_allow_unstable=False, + service_configs={}, + ns_configs={}, + security_configs={}, + license_data_usage={}, + expected={}, + ): + namespaces = list(expected.get("NAMESPACES", {}).keys()) + hosts = ns_stats.keys() + builds = metadata.setdefault("server_build", {}) + + for host in hosts: + builds.setdefault(host, "7.0.0") + service_stats.setdefault(host, {}) + for ns in namespaces: + ns_stats.setdefault(host, {}).setdefault(ns, {}) + ns_configs.setdefault(host, {}).setdefault(ns, {}) + + init_expected = common._initialize_summary_output(namespaces) + expected = util.deep_merge_dicts(init_expected, expected) + + return ( + service_stats, + ns_stats, + xdr_dc_stats, + metadata, + license_allow_unstable, + service_configs, + ns_configs, + security_configs, + license_data_usage, + init_expected, + ) + + @parameterized.expand( + [ + # Test Devices Counts + create_tc( + ns_stats={ + "1.1.1.1": { + "test": { + "storage-engine.device[0]": 0, + "replication-factor": 1, + }, + "bar": { + "storage-engine.device": 0, + "storage-engine.file[0]": 0, + "storage-engine.file": 0, + "replication-factor": 2, + }, + }, + "2.2.2.2": { + "test": { + "storage-engine.device[0]": 0, + "replication-factor": 1, + }, + "bar": { + "storage-engine.device": 0, + "storage-engine.file[0]": 0, + "storage-engine.file": 0, + "replication-factor": 2, + }, + }, + }, + expected={ + "CLUSTER": { + "device_count": 8, + "device_count_per_node": 4, + "ns_count": 2, + }, + "NAMESPACES": { + "test": { + "devices_total": 2, + "devices_per_node": 1, + "repl_factor": [1], + }, + "bar": { + "devices_total": 6, + "devices_per_node": 3, + "repl_factor": [2], + }, + }, + }, + ), + # Test Pre 7.0 Memory Usage. Shmem is not displayed in this case by choice + create_tc( + ns_stats={ + "1.1.1.1": { + "test": { + "memory_used_bytes": 1024, + }, + }, + "2.2.2.2": { + "bar": { + "memory_used_bytes": 1024, + }, + }, + }, + ns_configs={ + "1.1.1.1": { + "test": { + "storage-engine": "memory", + "memory-size": 2048, + "index-type": "shmem", + "replication-factor": 1, + }, + }, + "2.2.2.2": { + "bar": { + "storage-engine": "memory", + "memory-size": 2048, + "index-type": "shmem", + "replication-factor": 1, + }, + }, + }, + expected={ + "CLUSTER": { + "active_features": ["Index-on-shmem"], + "ns_count": 2, + "license_data": { + "latest": 0 + }, # memory_data_used_bytes and memory_index_used_bytes are used for license calculation + "memory_data_and_indexes": { + "total": 4096, + "used": 2048, + "used_pct": 50.0, + "avail": 2048, + "avail_pct": 50.0, + }, + }, + "NAMESPACES": { + "test": { + "memory_data_and_indexes": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024, + "avail_pct": 50.0, + }, + "repl_factor": [1], + "license_data": {"latest": 0}, + }, + "bar": { + "memory_data_and_indexes": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024, + "avail_pct": 50.0, + }, + "repl_factor": [1], + "license_data": {"latest": 0}, + }, + }, + }, + ), + # Test Pre 7.0 Pmem/Pmem-index usage + create_tc( + ns_stats={ + "1.1.1.1": { + "test": { + "memory_used_bytes": 1024, + "index_pmem_used_bytes": 512, + "pmem_used_bytes": 1024, + "pmem_total_bytes": 2048, + "pmem_available_pct": 50, + }, + }, + "2.2.2.2": { + "bar": { + "memory_used_bytes": 1024, + "index_pmem_used_bytes": 512, + "pmem_used_bytes": 1024, + "pmem_total_bytes": 2048, + "pmem_available_pct": 50, + }, + }, + }, + ns_configs={ + "1.1.1.1": { + "test": { + "memory-size": 2048, + "storage-engine": "pmem", + "index-type": "pmem", + "index-type.mounts-size-limit": 1024, + "replication-factor": 1, + }, + }, + "2.2.2.2": { + "bar": { + "memory-size": 2048, + "storage-engine": "pmem", + "index-type": "pmem", + "index-type.mounts-size-limit": 1024, + "replication-factor": 1, + }, + }, + }, + expected={ + "CLUSTER": { + "active_features": ["Index-on-pmem"], + "ns_count": 2, + "license_data": {"latest": 2048}, + "memory_data_and_indexes": { + "total": 4096, + "used": 2048, + "used_pct": 50.0, + "avail": 2048, + "avail_pct": 50.0, + }, + "pmem_index": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024, + "avail_pct": 50.0, + }, + "pmem": { + "total": 4096, + "used": 2048, + "used_pct": 50.0, + "avail": 2048.0, + "avail_pct": 50.0, + }, + }, + "NAMESPACES": { + "test": { + "index_type": "pmem", + "memory_data_and_indexes": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024, + "avail_pct": 50.0, + }, + "pmem_index": { + "total": 1024, + "used": 512, + "used_pct": 50.0, + "avail": 512, + "avail_pct": 50.0, + }, + "pmem": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024.0, + "avail_pct": 50.0, + }, + "repl_factor": [1], + "license_data": {"latest": 1024}, + }, + "bar": { + "index_type": "pmem", + "memory_data_and_indexes": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024, + "avail_pct": 50.0, + }, + "pmem_index": { + "total": 1024, + "used": 512, + "used_pct": 50.0, + "avail": 512, + "avail_pct": 50.0, + }, + "pmem": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024.0, + "avail_pct": 50.0, + }, + "repl_factor": [1], + "license_data": {"latest": 1024}, + }, + }, + }, + ), + # Test Pre 7.0 Device/Flash-index Usage + create_tc( + ns_stats={ + "1.1.1.1": { + "test": { + "index_flash_used_bytes": 512, + "device_used_bytes": 1024, + "device_total_bytes": 2048, + "device_available_pct": 50, + }, + }, + "2.2.2.2": { + "bar": { + "index_flash_used_bytes": 512, + "device_used_bytes": 1024, + "device_total_bytes": 2048, + "device_available_pct": 50, + }, + }, + }, + ns_configs={ + "1.1.1.1": { + "test": { + "storage-engine": "device", + "index-type": "flash", + "index-type.mounts-size-limit": 1024, + "replication-factor": 1, + }, + }, + "2.2.2.2": { + "bar": { + "storage-engine": "device", + "index-type": "flash", + "index-type.mounts-size-limit": 1024, + "replication-factor": 1, + }, + }, + }, + expected={ + "CLUSTER": { + "active_features": ["Index-on-flash"], + "ns_count": 2, + "license_data": {"latest": 2048}, + "flash_index": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024, + "avail_pct": 50.0, + }, + "device": { + "total": 4096, + "used": 2048, + "used_pct": 50.0, + "avail": 2048.0, + "avail_pct": 50.0, + }, + }, + "NAMESPACES": { + "test": { + "index_type": "flash", + "flash_index": { + "total": 1024, + "used": 512, + "used_pct": 50.0, + "avail": 512, + "avail_pct": 50.0, + }, + "device": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024.0, + "avail_pct": 50.0, + }, + "repl_factor": [1], + "license_data": {"latest": 1024}, + }, + "bar": { + "index_type": "flash", + "flash_index": { + "total": 1024, + "used": 512, + "used_pct": 50.0, + "avail": 512, + "avail_pct": 50.0, + }, + "device": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024.0, + "avail_pct": 50.0, + }, + "repl_factor": [1], + "license_data": {"latest": 1024}, + }, + }, + }, + ), + # Test New 7.0 Memory/Shmem-index usage + create_tc( + ns_stats={ + "1.1.1.1": { + "test": { + "index_used_bytes": 512, + "data_used_bytes": 1024, + "data_total_bytes": 2048, + "data_avail_pct": 50, + }, + }, + "2.2.2.2": { + "bar": { + "index_used_bytes": 512, + "data_used_bytes": 1024, + "data_total_bytes": 2048, + "data_avail_pct": 50, + }, + }, + }, + ns_configs={ + "1.1.1.1": { + "test": { + "storage-engine": "memory", + "index-type": "shmem", # shmem index has no mounts-budget + "replication-factor": 1, + }, + }, + "2.2.2.2": { + "bar": { + "storage-engine": "memory", + "index-type": "shmem", # shmem index has no mounts-budget + "replication-factor": 1, + }, + }, + }, + expected={ + "CLUSTER": { + "active_features": ["Index-on-shmem"], + "ns_count": 2, + "license_data": {"latest": 2048}, + "shmem_index": { + "used": 1024, + }, + "memory": { + "total": 4096, + "used": 2048, + "used_pct": 50.0, + "avail": 2048.0, + "avail_pct": 50.0, + }, + }, + "NAMESPACES": { + "test": { + "index_type": "shmem", + "shmem_index": { + "used": 512, + }, + "memory": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024.0, + "avail_pct": 50.0, + }, + "repl_factor": [1], + "license_data": {"latest": 1024}, + }, + "bar": { + "index_type": "shmem", + "shmem_index": { + "used": 512, + }, + "memory": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024.0, + "avail_pct": 50.0, + }, + "repl_factor": [1], + "license_data": {"latest": 1024}, + }, + }, + }, + ), + # Test New 7.0 Device/Flash-index usage + create_tc( + ns_stats={ + "1.1.1.1": { + "test": { + "index_used_bytes": 512, + "data_used_bytes": 1024, + "data_total_bytes": 2048, + "data_avail_pct": 50, + }, + }, + "2.2.2.2": { + "bar": { + "index_used_bytes": 512, + "data_used_bytes": 1024, + "data_total_bytes": 2048, + "data_avail_pct": 50, + }, + }, + }, + ns_configs={ + "1.1.1.1": { + "test": { + "storage-engine": "device", + "index-type": "flash", + "index-type.mounts-budget": "1024", + "replication-factor": 1, + }, + }, + "2.2.2.2": { + "bar": { + "storage-engine": "device", + "index-type": "flash", + "index-type.mounts-budget": "1024", + "replication-factor": 1, + }, + }, + }, + expected={ + "CLUSTER": { + "active_features": ["Index-on-flash"], + "ns_count": 2, + "license_data": {"latest": 2048}, + "flash_index": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024, + "avail_pct": 50.0, + }, + "device": { + "total": 4096, + "used": 2048, + "used_pct": 50.0, + "avail": 2048.0, + "avail_pct": 50.0, + }, + }, + "NAMESPACES": { + "test": { + "index_type": "flash", + "flash_index": { + "total": 1024, + "used": 512, + "used_pct": 50.0, + "avail": 512, + "avail_pct": 50.0, + }, + "device": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024.0, + "avail_pct": 50.0, + }, + "repl_factor": [1], + "license_data": {"latest": 1024}, + }, + "bar": { + "index_type": "flash", + "flash_index": { + "total": 1024, + "used": 512, + "used_pct": 50.0, + "avail": 512, + "avail_pct": 50.0, + }, + "device": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024.0, + "avail_pct": 50.0, + }, + "repl_factor": [1], + "license_data": {"latest": 1024}, + }, + }, + }, + ), + # Test New 7.0 Pmem/Pmem-index usage + create_tc( + ns_stats={ + "1.1.1.1": { + "test": { + "index_used_bytes": 512, + "data_used_bytes": 1024, + "data_total_bytes": 2048, + "data_avail_pct": 50, + }, + }, + "2.2.2.2": { + "bar": { + "index_used_bytes": 512, + "data_used_bytes": 1024, + "data_total_bytes": 2048, + "data_avail_pct": 50, + }, + }, + }, + ns_configs={ + "1.1.1.1": { + "test": { + "storage-engine": "pmem", + "index-type": "pmem", + "index-type.mounts-budget": "1024", + "replication-factor": 1, + }, + }, + "2.2.2.2": { + "bar": { + "storage-engine": "pmem", + "index-type": "pmem", + "index-type.mounts-budget": "1024", + "replication-factor": 1, + }, + }, + }, + expected={ + "CLUSTER": { + "active_features": ["Index-on-pmem"], + "ns_count": 2, + "license_data": {"latest": 2048}, + "pmem_index": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024, + "avail_pct": 50.0, + }, + "pmem": { + "total": 4096, + "used": 2048, + "used_pct": 50.0, + "avail": 2048.0, + "avail_pct": 50.0, + }, + }, + "NAMESPACES": { + "test": { + "index_type": "pmem", + "pmem_index": { + "total": 1024, + "used": 512, + "used_pct": 50.0, + "avail": 512, + "avail_pct": 50.0, + }, + "pmem": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024.0, + "avail_pct": 50.0, + }, + "repl_factor": [1], + "license_data": {"latest": 1024}, + }, + "bar": { + "index_type": "pmem", + "pmem_index": { + "total": 1024, + "used": 512, + "used_pct": 50.0, + "avail": 512, + "avail_pct": 50.0, + }, + "pmem": { + "total": 2048, + "used": 1024, + "used_pct": 50.0, + "avail": 1024.0, + "avail_pct": 50.0, + }, + "repl_factor": [1], + "license_data": {"latest": 1024}, + }, + }, + }, + ), + # Test Compression Ratio usage + create_tc( + ns_stats={ + "1.1.1.1": { + "test": { + "pmem_compression_ratio": "0.75", + }, + }, + }, + ns_configs={ + "1.1.1.1": { + "test": { + "replication-factor": 1, + }, + }, + }, + expected={ + "CLUSTER": { + "active_features": ["Compression"], + "ns_count": 1, + "license_data": {"latest": 0}, + }, + "NAMESPACES": { + "test": { + "compression_ratio": 0.75, + "repl_factor": [1], + "license_data": {"latest": 0}, + }, + }, + }, + ), + ] + ) + def test_create_summary_namespace_usage_stats( + self, + service_stats, + ns_stats, + xdr_dc_stats, + metadata, + license_allow_unstable, + service_configs, + ns_configs, + security_configs, + license_data_usage, + expected, + ): + actual = common.create_summary( + service_stats, + ns_stats, + xdr_dc_stats, + metadata, + license_allow_unstable, + service_configs, + ns_configs, + security_configs, + license_data_usage, + ) + + self.assertDictEqual(actual, expected) + + @parameterized.expand( + [ + # Test Compression Ratio usage + create_tc( + ns_stats={ + "1.1.1.1": { + "test": { + "data_compression_ratio": "0.73", # Post 7.0 + }, + "bar": { + "device_compression_ratio": "0.74", # Pre 7.0 + }, + "foo": { + "pmem_compression_ratio": "0.75", # Pre 7.0 + }, + }, + }, + ns_configs={ + "1.1.1.1": { + "test": { + "replication-factor": 1, + }, + "bar": { + "replication-factor": 1, + }, + "foo": { + "replication-factor": 1, + }, + }, + }, + expected={ + "CLUSTER": { + "active_features": ["Compression"], + "ns_count": 3, + "license_data": {"latest": 0}, + }, + "NAMESPACES": { + "test": { + "compression_ratio": 0.73, + "repl_factor": [1], + "license_data": {"latest": 0}, + }, + "bar": { + "compression_ratio": 0.74, + "repl_factor": [1], + "license_data": {"latest": 0}, + }, + "foo": { + "compression_ratio": 0.75, + "repl_factor": [1], + "license_data": {"latest": 0}, + }, + }, + }, + ), + ] + ) + def test_create_summary_compression_ratio( + self, + service_stats, + ns_stats, + xdr_dc_stats, + metadata, + license_allow_unstable, + service_configs, + ns_configs, + security_configs, + license_data_usage, + expected, + ): + actual = common.create_summary( + service_stats, + ns_stats, + xdr_dc_stats, + metadata, + license_allow_unstable, + service_configs, + ns_configs, + security_configs, + license_data_usage, + ) + + self.assertDictEqual(actual, expected) diff --git a/test/unit/view/test_view.py b/test/unit/view/test_view.py index 84380bdc..16c6387f 100644 --- a/test/unit/view/test_view.py +++ b/test/unit/view/test_view.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datetime import unittest -from mock import MagicMock, call, create_autospec, patch +from mock import MagicMock, call, patch +from lib.utils.common import SummaryClusterDict, SummaryNamespacesDict -from lib.view import templates +from lib.view import templates, terminal from lib.view.view import CliView from lib.view.sheet.const import SheetStyle from lib.live_cluster.client.node import ASInfoResponseError @@ -174,7 +176,7 @@ def test_show_roster_with_mods(self): self.cluster_mock, flip=True, timestamp="test-stamp", - **{"for": "ba", "with": ["foo"]} + **{"for": "ba", "with": ["foo"]}, ) self.cluster_mock.get_node_names.assert_called_with(["foo"]) @@ -205,7 +207,7 @@ def test_show_best_practices(self): self.cluster_mock, failed_practices, timestamp=timestamp, - **{"with": ["bar"]} + **{"with": ["bar"]}, ) self.cluster_mock.get_node_names.assert_called_with(["bar"]) @@ -245,7 +247,7 @@ def test_show_jobs(self): self.cluster_mock, jobs_data, timestamp=timestamp, - **{"trid": ["1", "3", "5"], "like": ["foo"], "with": ["bar"]} + **{"trid": ["1", "3", "5"], "like": ["foo"], "with": ["bar"]}, ) self.cluster_mock.get_node_names.assert_called_with(["bar"]) @@ -679,3 +681,298 @@ def test_show_users_stats(self): sources, common=common, ) + + def test_summary_cluster_list_view(self): + cluster_data: SummaryClusterDict = { + "active_features": ["Compression"], + "ns_count": 1, + "migrations_in_progress": True, + "cluster_name": ["test-cluster"], + "server_version": ["7.0.0.0"], + "os_version": ["Linux 4.15.0-106-generic"], + "cluster_size": [1], + "device_count": 2, + "device_count_per_node": 2, + "device_count_same_across_nodes": True, + "pmem_index": { + "total": 1, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "flash_index": { + "total": 1, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "shmem_index": { + "total": 1, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "memory_data_and_indexes": { + "total": 1, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "memory": { + "total": 1, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "device": { + "total": 1, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "pmem": { + "total": 1, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "license_data": { + "latest_time": datetime.datetime.fromtimestamp( + 1696451742, tz=datetime.timezone.utc + ), + "latest": 2, + "min": 3, + "max": 4, + "avg": 5, + }, + "active_ns": 2, + "ns_count": 2, + "active_features": ["Compression", "Depression"], + } + expected = f"""Cluster ({terminal.fg_red()}Migrations in Progress{terminal.fg_clear()}) +================================= + + 1. Cluster Name : test-cluster + 2. Server Version : 7.0.0.0 + 3. OS Version : Linux 4.15.0-106-generic + 4. Cluster Size : 1 + 5. Devices : Total 2, per-node 2 + 6. Pmem Index : Total 1.000 B, 4.00% used (2.000 B), 5.00% available (3.000 B) + 7. Flash Index : Total 1.000 B, 4.00% used (2.000 B), 5.00% available (3.000 B) + 8. Shmem Index : Total 1.000 B, 4.00% used (2.000 B), 5.00% available (3.000 B) + 9. Memory : Total 1.000 B, 4.00% used (2.000 B), 5.00% available (3.000 B) includes data, pindex, and sindex + 10. Memory : Total 1.000 B, 4.00% used (2.000 B), 5.00% available contiguous space (3.000 B) + 11. Device : Total 1.000 B, 4.00% used (2.000 B), 5.00% available contiguous space (3.000 B) + 12. Pmem : Total 1.000 B, 4.00% used (2.000 B), 5.00% available contiguous space (3.000 B) + 13. License Usage : Latest (2023-10-04T20:35:42+00:00): 2.000 B Min: 3.000 B Max: 4.000 B Avg: 5.000 B + 14. Active Namespaces : 2 of 2 + 15. Active Features : Compression, Depression + +""" + + actual = CliView._summary_cluster_list_view(cluster_data) + actual_str = [] + + for line in actual: + lines = str(line).split("\n") + actual_str.extend(lines) + + self.assertListEqual(actual_str, expected.split("\n")) + + def test_summary_namespace_list_view(self): + ns_data: SummaryNamespacesDict = { + "test": { + "devices_total": 2, + "devices_per_node": 2, + "device_count_same_across_nodes": True, + "repl_factor": [1], + "master_objects": 2, + "migrations_in_progress": True, + "rack_aware": True, + "pmem_index": { + "total": 1, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "flash_index": { + "total": 2, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "shmem_index": { + "total": 3, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "memory_data_and_indexes": { + "total": 4, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "memory": { + "total": 5, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "device": { + "total": 6, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "pmem": { + "total": 7, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "license_data": { + "latest_time": datetime.datetime.fromtimestamp( + 1696451742, tz=datetime.timezone.utc + ), + "latest": 2, + "min": 3, + "max": 4, + "avg": 5, + }, + "cache_read_pct": 1, + "rack_aware": True, + "master_objects": 2, + "compression_ratio": 0.5, + }, + "bar": { + "devices_total": 2, + "devices_per_node": 2, + "device_count_same_across_nodes": False, + "repl_factor": [1], + "master_objects": 2, + "migrations_in_progress": False, + "pmem_index": { + "total": 1, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "flash_index": { + "total": 2, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "shmem_index": { + "total": 3, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "memory_data_and_indexes": { + "total": 4, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "memory": { + "total": 5, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "device": { + "total": 6, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "pmem": { + "total": 7, + "used": 2, + "avail": 3, + "used_pct": 4, + "avail_pct": 5, + }, + "license_data": { + "latest_time": datetime.datetime.fromtimestamp( + 1696451742, tz=datetime.timezone.utc + ), + "latest": 2, + "min": 3, + "max": 4, + "avg": 5, + }, + "cache_read_pct": 1, + "rack_aware": False, + "master_objects": 2, + "compression_ratio": 0.5, + }, + } + expected = f"""Namespaces +========== + + {terminal.fg_red()}test{terminal.fg_clear()} + ==== + 1. Devices : Total 2, per-node 2 + 2. Pmem Index : Total 1.000 B, 4.00% used (2.000 B), 5.00% available (3.000 B) + 3. Flash Index : Total 2.000 B, 4.00% used (2.000 B), 5.00% available (3.000 B) + 4. Shmem Index : Total 3.000 B, 4.00% used (2.000 B), 5.00% available (3.000 B) + 5. Memory : Total 4.000 B, 4.00% used (2.000 B), 5.00% available (3.000 B) includes data, pindex, and sindex + 6. Memory : Total 5.000 B, 4.00% used (2.000 B), 5.00% available contiguous space (3.000 B) + 7. Device : Total 6.000 B, 4.00% used (2.000 B), 5.00% available contiguous space (3.000 B) + 8. Pmem : Total 7.000 B, 4.00% used (2.000 B), 5.00% available contiguous space (3.000 B) + 9. License Usage : Latest (2023-10-04T20:35:42+00:00): 2.000 B Min: 3.000 B Max: 4.000 B Avg: 5.000 B + 10. Replication Factor : 1 + 11. Post-Write-Queue Hit-Rate: 1.000 + 12. Rack-aware : True + 13. Master Objects : 2.000 + 14. Compression-ratio : 0.5 + + {terminal.fg_red()}bar{terminal.fg_clear()} + === + 1. Devices : Total 2, per-node 2 (number differs across nodes) + 2. Pmem Index : Total 1.000 B, 4.00% used (2.000 B), 5.00% available (3.000 B) + 3. Flash Index : Total 2.000 B, 4.00% used (2.000 B), 5.00% available (3.000 B) + 4. Shmem Index : Total 3.000 B, 4.00% used (2.000 B), 5.00% available (3.000 B) + 5. Memory : Total 4.000 B, 4.00% used (2.000 B), 5.00% available (3.000 B) includes data, pindex, and sindex + 6. Memory : Total 5.000 B, 4.00% used (2.000 B), 5.00% available contiguous space (3.000 B) + 7. Device : Total 6.000 B, 4.00% used (2.000 B), 5.00% available contiguous space (3.000 B) + 8. Pmem : Total 7.000 B, 4.00% used (2.000 B), 5.00% available contiguous space (3.000 B) + 9. License Usage : Latest (2023-10-04T20:35:42+00:00): 2.000 B Min: 3.000 B Max: 4.000 B Avg: 5.000 B + 10. Replication Factor : 1 + 11. Post-Write-Queue Hit-Rate: 1.000 + 12. Rack-aware : False + 13. Master Objects : 2.000 + 14. Compression-ratio : 0.5 +""" + + actual = CliView._summary_namespace_list_view(ns_data) + actual_str = [] + + for line in actual: + lines = str(line).split("\n") + actual_str.extend(lines) + + self.assertListEqual(actual_str, expected.split("\n"))