diff --git a/CODE_STYLE.md b/CODE_STYLE.md index 5e5914de46c..254a3f7a12f 100644 --- a/CODE_STYLE.md +++ b/CODE_STYLE.md @@ -109,15 +109,26 @@ These assert will not be part of the release binary and won't hurt the execution **example needed** -## Errors +## Errors and log messages -Error messages should be concise, lowercase (except proper names), and without trailing punctuation. +Error and log messages follow the same format. They should be concise, lowercase (except proper names), and without trailing punctuation. -### Examples +As a loose rule, where it does not hurt readability, log messages should rely on `tracing` +structured logging instead of templating. + +In other words, prefer: +`warn!(remaining=remaining_attempts, "trubulizor rpc plane retry failed")` +to +`warn!("trubulizor rpc plane retry failed ({remaining_attempts} attempts remaining)")` + +### Error Examples - "failed to start actor runtimes" - "cannot join PostgreSQL URI {} with path {:?}" - "could not find split metadata in Metastore {}" -- "unkown output format {:?}" +- "unknown output format {:?}" + +### Log examples + ## Comments diff --git a/quickwit/Makefile b/quickwit/Makefile index 21cc5c4909c..f462b176392 100644 --- a/quickwit/Makefile +++ b/quickwit/Makefile @@ -6,6 +6,8 @@ fmt: @(rustup toolchain list | ( ! grep -q nightly && echo "Toolchain 'nightly' is not installed. Please install using 'rustup toolchain install nightly'.") ) || cargo +nightly fmt @echo "Checking license headers" @bash scripts/check_license_headers.sh + @echo "Checking log format" + @bash scripts/check_log_format.sh fix: @echo "Running cargo clippy --fix" diff --git a/quickwit/quickwit-actors/src/lib.rs b/quickwit/quickwit-actors/src/lib.rs index 1894df9264e..886c775b935 100644 --- a/quickwit/quickwit-actors/src/lib.rs +++ b/quickwit/quickwit-actors/src/lib.rs @@ -60,8 +60,7 @@ pub use observation::{Observation, ObservationType}; use quickwit_common::KillSwitch; pub use spawn_builder::SpawnContext; use thiserror::Error; -use tracing::info; -use tracing::log::warn; +use tracing::{info, warn}; pub use universe::Universe; pub use self::actor_context::ActorContext; @@ -93,19 +92,19 @@ fn heartbeat_from_env_or_default() -> Duration { match std::env::var("QW_ACTOR_HEARTBEAT_SECS") { Ok(actor_hearbeat_secs_str) => { if let Ok(actor_hearbeat_secs) = actor_hearbeat_secs_str.parse::() { - info!("Set the actor heartbeat to {actor_hearbeat_secs} seconds."); + info!("set the actor heartbeat to {actor_hearbeat_secs} seconds"); return Duration::from_secs(actor_hearbeat_secs.get()); } else { warn!( - "Failed to parse `QW_ACTOR_HEARTBEAT_SECS={actor_hearbeat_secs_str}` in \ - seconds > 0, using default heartbeat (30 seconds)." + "failed to parse `QW_ACTOR_HEARTBEAT_SECS={actor_hearbeat_secs_str}` in \ + seconds > 0, using default heartbeat (30 seconds)" ); }; } Err(std::env::VarError::NotUnicode(os_str)) => { warn!( - "Failed to parse `QW_ACTOR_HEARTBEAT_SECS={os_str:?}` in a valid unicode string, \ - using default heartbeat (30 seconds)." + "failed to parse `QW_ACTOR_HEARTBEAT_SECS={os_str:?}` in a valid unicode string, \ + using default heartbeat (30 seconds)" ); } Err(std::env::VarError::NotPresent) => {} diff --git a/quickwit/quickwit-cli/src/jemalloc.rs b/quickwit/quickwit-cli/src/jemalloc.rs index 25600e25522..39a8642ebc3 100644 --- a/quickwit/quickwit-cli/src/jemalloc.rs +++ b/quickwit/quickwit-cli/src/jemalloc.rs @@ -59,7 +59,7 @@ pub async fn jemalloc_metrics_loop() -> tikv_jemalloc_ctl::Result<()> { pub fn start_jemalloc_metrics_loop() { tokio::task::spawn(async { if let Err(jemalloc_metrics_err) = jemalloc_metrics_loop().await { - error!(err=?jemalloc_metrics_err, "Failed to gather metrics from jemalloc."); + error!(err=?jemalloc_metrics_err, "failed to gather metrics from jemalloc"); } }); } diff --git a/quickwit/quickwit-cli/src/lib.rs b/quickwit/quickwit-cli/src/lib.rs index aaad5246615..972cd1d35e5 100644 --- a/quickwit/quickwit-cli/src/lib.rs +++ b/quickwit/quickwit-cli/src/lib.rs @@ -222,7 +222,7 @@ async fn load_node_config(config_uri: &Uri) -> anyhow::Result { let config = NodeConfig::load(config_format, config_content.as_slice()) .await .with_context(|| format!("failed to parse node config `{config_uri}`"))?; - info!(config_uri=%config_uri, config=?config, "Loaded node config."); + info!(config_uri=%config_uri, config=?config, "loaded node config"); Ok(config) } @@ -402,10 +402,10 @@ pub mod busy_detector { let suppressed = SUPPRESSED_DEBUG_COUNT.swap(0, Ordering::Relaxed); if suppressed == 0 { - debug!("Thread wasn't parked for {delta}µs, is the runtime too busy?"); + debug!("thread wasn't parked for {delta}µs, is the runtime too busy?"); } else { debug!( - "Thread wasn't parked for {delta}µs, is the runtime too busy? ({suppressed} \ + "thread wasn't parked for {delta}µs, is the runtime too busy? ({suppressed} \ similar messages suppressed)" ); } diff --git a/quickwit/quickwit-cli/src/service.rs b/quickwit/quickwit-cli/src/service.rs index 374533633ea..d0991acbc30 100644 --- a/quickwit/quickwit-cli/src/service.rs +++ b/quickwit/quickwit-cli/src/service.rs @@ -81,7 +81,7 @@ impl RunCliCommand { crate::busy_detector::set_enabled(true); if let Some(services) = &self.services { - tracing::info!(services = %services.iter().join(", "), "Setting services from override."); + tracing::info!(services = %services.iter().join(", "), "setting services from override"); node_config.enabled_services = services.clone(); } let telemetry_handle_opt = diff --git a/quickwit/quickwit-cli/src/tool.rs b/quickwit/quickwit-cli/src/tool.rs index ecacae7167b..b3949149658 100644 --- a/quickwit/quickwit-cli/src/tool.rs +++ b/quickwit/quickwit-cli/src/tool.rs @@ -630,12 +630,12 @@ pub async fn merge_cli(args: MergeArgs) -> anyhow::Result<()> { let observation = pipeline_handle.last_observation(); if observation.num_ongoing_merges == 0 { - info!("Merge pipeline has no more ongoing merges, Exiting."); + info!("merge pipeline has no more ongoing merges, exiting"); break; } if pipeline_handle.state().is_exit() { - info!("Merge pipeline has exited, Exiting."); + info!("merge pipeline has exited, exiting"); break; } } diff --git a/quickwit/quickwit-cli/tests/helpers.rs b/quickwit/quickwit-cli/tests/helpers.rs index ed5eeb6415b..2e546ffa8b7 100644 --- a/quickwit/quickwit-cli/tests/helpers.rs +++ b/quickwit/quickwit-cli/tests/helpers.rs @@ -158,7 +158,7 @@ impl TestEnv { }; tokio::spawn(async move { if let Err(error) = run_command.execute().await { - error!(err=?error, "Failed to start a quickwit server."); + error!(err=?error, "failed to start a quickwit server"); } }); wait_for_server_ready(([127, 0, 0, 1], self.rest_listen_port).into()).await?; diff --git a/quickwit/quickwit-cluster/src/member.rs b/quickwit/quickwit-cluster/src/member.rs index 2bdfb085d6d..da35296379b 100644 --- a/quickwit/quickwit-cluster/src/member.rs +++ b/quickwit/quickwit-cluster/src/member.rs @@ -122,7 +122,7 @@ fn parse_indexing_cpu_capacity(node_state: &NodeState) -> CpuCapacity { if let Ok(indexing_capacity) = CpuCapacity::from_str(indexing_capacity_str) { indexing_capacity } else { - error!(indexing_capacity=?indexing_capacity_str, "Received an unparseable indexing capacity from node."); + error!(indexing_capacity=?indexing_capacity_str, "received an unparseable indexing capacity from node"); CpuCapacity::zero() } } diff --git a/quickwit/quickwit-common/src/runtimes.rs b/quickwit/quickwit-common/src/runtimes.rs index 6c80567903c..9f05d80ffb3 100644 --- a/quickwit/quickwit-common/src/runtimes.rs +++ b/quickwit/quickwit-common/src/runtimes.rs @@ -120,7 +120,7 @@ impl RuntimeType { .get_or_init(|| { #[cfg(any(test, feature = "testsuite"))] { - tracing::warn!("Starting Tokio actor runtimes for tests."); + tracing::warn!("starting Tokio actor runtimes for tests"); start_runtimes(RuntimesConfig::light_for_tests()) } #[cfg(not(any(test, feature = "testsuite")))] diff --git a/quickwit/quickwit-common/src/stream_utils.rs b/quickwit/quickwit-common/src/stream_utils.rs index d18813a0886..c3c29b13819 100644 --- a/quickwit/quickwit-common/src/stream_utils.rs +++ b/quickwit/quickwit-common/src/stream_utils.rs @@ -148,7 +148,7 @@ where T: Send + 'static Ok(Some(message)) => Some((message, streaming)), Ok(None) => None, Err(error) => { - warn!(error=?error, "gRPC transport error."); + warn!(error=?error, "gRPC transport error"); None } } diff --git a/quickwit/quickwit-config/src/lib.rs b/quickwit/quickwit-config/src/lib.rs index 6f5e0c77cca..e4406ce259c 100644 --- a/quickwit/quickwit-config/src/lib.rs +++ b/quickwit/quickwit-config/src/lib.rs @@ -199,7 +199,7 @@ impl ConfigFormat { serde_json::from_reader(StripComments::new(payload))?; let version_value = json_value.get_mut("version").context("missing version")?; if let Some(version_number) = version_value.as_u64() { - warn!("`version` is supposed to be a string."); + warn!(version_value=?version_value, "`version` is supposed to be a string"); *version_value = JsonValue::String(version_number.to_string()); } serde_json::from_value(json_value).context("failed to read JSON file") @@ -211,7 +211,7 @@ impl ConfigFormat { toml::from_str(payload_str).context("failed to read TOML file")?; let version_value = toml_value.get_mut("version").context("missing version")?; if let Some(version_number) = version_value.as_integer() { - warn!("`version` is supposed to be a string."); + warn!(version_value=?version_value, "`version` is supposed to be a string"); *version_value = toml::Value::String(version_number.to_string()); let reserialized = toml::to_string(version_value) .context("failed to reserialize toml config")?; diff --git a/quickwit/quickwit-config/src/node_config/mod.rs b/quickwit/quickwit-config/src/node_config/mod.rs index c381d396b14..b6412ad6ba0 100644 --- a/quickwit/quickwit-config/src/node_config/mod.rs +++ b/quickwit/quickwit-config/src/node_config/mod.rs @@ -353,7 +353,7 @@ impl NodeConfig { for peer_seed in &self.peer_seeds { let peer_seed_addr = HostAddr::parse_with_default_port(peer_seed, default_gossip_port)?; if let Err(error) = peer_seed_addr.resolve().await { - warn!(peer_seed = %peer_seed_addr, error = ?error, "Failed to resolve peer seed address."); + warn!(peer_seed = %peer_seed_addr, error = ?error, "failed to resolve peer seed address"); continue; } peer_seed_addrs.push(peer_seed_addr.to_string()) diff --git a/quickwit/quickwit-config/src/node_config/serialize.rs b/quickwit/quickwit-config/src/node_config/serialize.rs index 9a99d0fb60e..4f2d259222b 100644 --- a/quickwit/quickwit-config/src/node_config/serialize.rs +++ b/quickwit/quickwit-config/src/node_config/serialize.rs @@ -52,7 +52,7 @@ fn default_node_id() -> ConfigValue { Ok(short_hostname) => short_hostname, Err(error) => { let node_id = new_coolid("node"); - warn!(error=?error, "Failed to determine hostname or hostname was invalid, falling back to random node ID `{}`.", node_id); + warn!(error=?error, "failed to determine hostname or hostname was invalid, falling back to random node ID `{}`", node_id); node_id } }; @@ -100,12 +100,12 @@ fn default_data_dir_uri() -> ConfigValue { fn default_advertise_host(listen_ip: &IpAddr) -> anyhow::Result { if listen_ip.is_unspecified() { if let Some((interface_name, private_ip)) = find_private_ip() { - info!(advertise_address=%private_ip, interface_name=%interface_name, "Using sniffed advertise address."); + info!(advertise_address=%private_ip, interface_name=%interface_name, "using sniffed advertise address"); return Ok(Host::from(private_ip)); } bail!("listen address `{listen_ip}` is unspecified and advertise address is not set"); } - info!(advertise_address=%listen_ip, "Using listen address as advertise address."); + info!(advertise_address=%listen_ip, "using listen address as advertise address"); Ok(Host::from(*listen_ip)) } @@ -301,12 +301,12 @@ fn validate(node_config: &NodeConfig) -> anyhow::Result<()> { if node_config.cluster_id == DEFAULT_CLUSTER_ID { warn!( - "Cluster ID is not set, falling back to default value: `{}`.", + "cluster ID is not set, falling back to default value: `{}`", DEFAULT_CLUSTER_ID ); } if node_config.peer_seeds.is_empty() { - warn!("Peer seed list is empty."); + warn!("peer seed list is empty"); } Ok(()) } diff --git a/quickwit/quickwit-control-plane/src/indexing_scheduler/mod.rs b/quickwit/quickwit-control-plane/src/indexing_scheduler/mod.rs index 8f4020d81cf..cd46c889509 100644 --- a/quickwit/quickwit-control-plane/src/indexing_scheduler/mod.rs +++ b/quickwit/quickwit-control-plane/src/indexing_scheduler/mod.rs @@ -187,7 +187,7 @@ impl IndexingScheduler { crate::metrics::CONTROL_PLANE_METRICS.schedule_total.inc(); let mut indexers: Vec<(String, IndexerNodeInfo)> = self.get_indexers_from_indexer_pool(); if indexers.is_empty() { - warn!("No indexer available, cannot schedule an indexing plan."); + warn!("no indexer available, cannot schedule an indexing plan"); return; }; @@ -254,11 +254,11 @@ impl IndexingScheduler { last_applied_plan.indexing_tasks_per_indexer(), ); if !indexing_plans_diff.has_same_nodes() { - info!(plans_diff=?indexing_plans_diff, "Running plan and last applied plan node IDs differ: schedule an indexing plan."); + info!(plans_diff=?indexing_plans_diff, "running plan and last applied plan node IDs differ: schedule an indexing plan"); self.schedule_indexing_plan_if_needed(model); } else if !indexing_plans_diff.has_same_tasks() { // Some nodes may have not received their tasks, apply it again. - info!(plans_diff=?indexing_plans_diff, "Running tasks and last applied tasks differ: reapply last plan."); + info!(plans_diff=?indexing_plans_diff, "running tasks and last applied tasks differ: reapply last plan"); self.apply_physical_indexing_plan(&mut indexers, last_applied_plan.clone()); } } @@ -272,7 +272,7 @@ impl IndexingScheduler { indexers: &mut [(String, IndexerNodeInfo)], new_physical_plan: PhysicalIndexingPlan, ) { - debug!("Apply physical indexing plan: {:?}", new_physical_plan); + debug!(new_physical_plan=?new_physical_plan, "apply physical indexing plan"); for (node_id, indexing_tasks) in new_physical_plan.indexing_tasks_per_indexer() { // We don't want to block on a slow indexer so we apply this change asynchronously // TODO not blocking is cool, but we need to make sure there is not accumulation @@ -292,7 +292,7 @@ impl IndexingScheduler { .apply_indexing_plan(ApplyIndexingPlanRequest { indexing_tasks }) .await { - error!(indexer_node_id=%indexer.0, err=?error, "Error occurred when applying indexing plan to indexer."); + error!(indexer_node_id=%indexer.0, err=?error, "error occurred when applying indexing plan to indexer"); } } }); diff --git a/quickwit/quickwit-control-plane/src/indexing_scheduler/scheduling/mod.rs b/quickwit/quickwit-control-plane/src/indexing_scheduler/scheduling/mod.rs index 82c6a557249..63f12a87817 100644 --- a/quickwit/quickwit-control-plane/src/indexing_scheduler/scheduling/mod.rs +++ b/quickwit/quickwit-control-plane/src/indexing_scheduler/scheduling/mod.rs @@ -508,7 +508,7 @@ pub fn build_physical_indexing_plan( // TODO this is probably a bad idea to just not overschedule, as having a single index trail // behind will prevent the log GC. // A better strategy would probably be to close shard, and start prevent ingestion. - error!("unable to assign all sources in the cluster."); + error!("unable to assign all sources in the cluster"); } // Convert the new scheduling solution back to a physical plan. diff --git a/quickwit/quickwit-index-management/src/garbage_collection.rs b/quickwit/quickwit-index-management/src/garbage_collection.rs index f0062ff8921..9246fda84fd 100644 --- a/quickwit/quickwit-index-management/src/garbage_collection.rs +++ b/quickwit/quickwit-index-management/src/garbage_collection.rs @@ -202,7 +202,7 @@ async fn delete_splits_marked_for_deletion( .map(|split| split.split_metadata) .collect(), Err(error) => { - error!(error = ?error, "Failed to fetch deletable splits."); + error!(error = ?error, "failed to fetch deletable splits"); break; } }; diff --git a/quickwit/quickwit-indexing/src/actors/indexing_pipeline.rs b/quickwit/quickwit-indexing/src/actors/indexing_pipeline.rs index 2ee55edc844..886e75dc3ac 100644 --- a/quickwit/quickwit-indexing/src/actors/indexing_pipeline.rs +++ b/quickwit/quickwit-indexing/src/actors/indexing_pipeline.rs @@ -519,11 +519,11 @@ impl Handler for IndexingPipeline { if let Some(MetastoreError::NotFound { .. }) = spawn_error.downcast_ref::() { - info!(error = ?spawn_error, "Could not spawn pipeline, index might have been deleted."); + info!(error = ?spawn_error, "could not spawn pipeline, index might have been deleted"); return Err(ActorExitStatus::Success); } let retry_delay = wait_duration_before_retry(spawn.retry_count + 1); - error!(error = ?spawn_error, retry_count = spawn.retry_count, retry_delay = ?retry_delay, "Error while spawning indexing pipeline, retrying after some time."); + error!(error = ?spawn_error, retry_count = spawn.retry_count, retry_delay = ?retry_delay, "error while spawning indexing pipeline, retrying after some time"); ctx.schedule_self_msg( retry_delay, Spawn { diff --git a/quickwit/quickwit-indexing/src/actors/indexing_service.rs b/quickwit/quickwit-indexing/src/actors/indexing_service.rs index 1204339845e..65998f55c80 100644 --- a/quickwit/quickwit-indexing/src/actors/indexing_service.rs +++ b/quickwit/quickwit-indexing/src/actors/indexing_service.rs @@ -579,11 +579,11 @@ impl IndexingService { ) .await { - error!(pipeline_id=?new_pipeline_id, err=?error, "Failed to spawn pipeline."); + error!(pipeline_id=?new_pipeline_id, err=?error, "failed to spawn pipeline"); failed_spawning_pipeline_ids.push(new_pipeline_id.clone()); } } else { - error!(pipeline_id=?new_pipeline_id, "Failed to spawn pipeline: source does not exist."); + error!(pipeline_id=?new_pipeline_id, "failed to spawn pipeline: source does not exist"); failed_spawning_pipeline_ids.push(new_pipeline_id.clone()); } } else { @@ -674,7 +674,7 @@ impl IndexingService { .queues .into_iter() .collect(); - debug!(queues=?queues, "List ingest API queues."); + debug!(queues=?queues, "list ingest API queues"); let indexes_metadatas = self .metastore @@ -686,7 +686,7 @@ impl IndexingService { .into_iter() .map(|index_metadata| index_metadata.index_id().to_string()) .collect(); - debug!(index_ids=?index_ids, "List indexes."); + debug!(index_ids=?index_ids, "list indexes"); let queue_ids_to_delete = queues.difference(&index_ids); diff --git a/quickwit/quickwit-indexing/src/actors/merge_pipeline.rs b/quickwit/quickwit-indexing/src/actors/merge_pipeline.rs index 0ca0b00de78..be4e816871e 100644 --- a/quickwit/quickwit-indexing/src/actors/merge_pipeline.rs +++ b/quickwit/quickwit-indexing/src/actors/merge_pipeline.rs @@ -448,11 +448,11 @@ impl Handler for MergePipeline { if let Some(MetastoreError::NotFound { .. }) = spawn_error.downcast_ref::() { - info!(error = ?spawn_error, "Could not spawn pipeline, index might have been deleted."); + info!(error = ?spawn_error, "could not spawn pipeline, index might have been deleted"); return Err(ActorExitStatus::Success); } let retry_delay = wait_duration_before_retry(spawn.retry_count); - error!(error = ?spawn_error, retry_count = spawn.retry_count, retry_delay = ?retry_delay, "Error while spawning indexing pipeline, retrying after some time."); + error!(error = ?spawn_error, retry_count = spawn.retry_count, retry_delay = ?retry_delay, "error while spawning indexing pipeline, retrying after some time"); ctx.schedule_self_msg( retry_delay, Spawn { diff --git a/quickwit/quickwit-indexing/src/actors/merge_planner.rs b/quickwit/quickwit-indexing/src/actors/merge_planner.rs index 50157d9fc36..43f6fe28123 100644 --- a/quickwit/quickwit-indexing/src/actors/merge_planner.rs +++ b/quickwit/quickwit-indexing/src/actors/merge_planner.rs @@ -358,7 +358,7 @@ impl MergePlanner { // We run smaller merges in priority. merge_ops.sort_by_cached_key(|merge_op| Reverse(max_merge_ops(merge_op))); while let Some(merge_operation) = merge_ops.pop() { - info!(merge_operation=?merge_operation, "Planned merge operation."); + info!(merge_operation=?merge_operation, "planned merge operation"); let tracked_merge_operation = self .ongoing_merge_operations_inventory .track(merge_operation); diff --git a/quickwit/quickwit-indexing/src/actors/packager.rs b/quickwit/quickwit-indexing/src/actors/packager.rs index 26639e2d6bf..24a66e5fde7 100644 --- a/quickwit/quickwit-indexing/src/actors/packager.rs +++ b/quickwit/quickwit-indexing/src/actors/packager.rs @@ -301,7 +301,7 @@ fn create_packaged_split( append_to_tag_set(&named_field.name, &terms, &mut tags); } Err(tag_extraction_error) => { - warn!(err=?tag_extraction_error, "No field values will be registered in the split metadata."); + warn!(err=?tag_extraction_error, "no field values will be registered in the split metadata"); } } } diff --git a/quickwit/quickwit-indexing/src/actors/uploader.rs b/quickwit/quickwit-indexing/src/actors/uploader.rs index bb875be91f4..e70653bce4e 100644 --- a/quickwit/quickwit-indexing/src/actors/uploader.rs +++ b/quickwit/quickwit-indexing/src/actors/uploader.rs @@ -290,7 +290,7 @@ impl Handler for Uploader { let kill_switch = ctx.kill_switch().clone(); let split_ids = batch.split_ids(); if kill_switch.is_dead() { - warn!(split_ids=?split_ids,"Kill switch was activated. Cancelling upload."); + warn!(split_ids=?split_ids,"kill switch was activated, cancelling upload"); return Err(ActorExitStatus::Killed); } let metastore = self.metastore.clone(); @@ -311,7 +311,7 @@ impl Handler for Uploader { for packaged_split in batch.splits.iter() { if batch.publish_lock.is_dead() { // TODO: Remove the junk right away? - info!("Splits' publish lock is dead."); + info!("splits' publish lock is dead"); split_update_sender.discard()?; return Ok(()); } diff --git a/quickwit/quickwit-indexing/src/lib.rs b/quickwit/quickwit-indexing/src/lib.rs index ba0169b3d5e..47ef1d0e188 100644 --- a/quickwit/quickwit-indexing/src/lib.rs +++ b/quickwit/quickwit-indexing/src/lib.rs @@ -75,7 +75,7 @@ pub async fn start_indexing_service( storage_resolver: StorageResolver, event_broker: EventBroker, ) -> anyhow::Result> { - info!("Starting indexer service."); + info!("starting indexer service"); // Spawn indexing service. let indexing_service = IndexingService::new( diff --git a/quickwit/quickwit-indexing/src/source/gcp_pubsub_source.rs b/quickwit/quickwit-indexing/src/source/gcp_pubsub_source.rs index a22e5e2c454..9dc8d5d79b3 100644 --- a/quickwit/quickwit-indexing/src/source/gcp_pubsub_source.rs +++ b/quickwit/quickwit-indexing/src/source/gcp_pubsub_source.rs @@ -175,7 +175,7 @@ impl Source for GcpPubSubSource { tokio::select! { resp = self.pull_message_batch(&mut batch) => { if let Err(err) = resp { - warn!("Failed to pull messages from subscription `{}`: {:?}", self.subscription_name, err); + warn!("failed to pull messages from subscription `{}`: {:?}", self.subscription_name, err); } if batch.num_bytes >= BATCH_NUM_BYTES_LIMIT { break; @@ -196,7 +196,7 @@ impl Source for GcpPubSubSource { // TODO: need to wait for all the id to be ack for at_least_once if self.should_exit() { - info!(subscription=%self.subscription_name, "Reached end of subscription."); + info!(subscription=%self.subscription_name, "reached end of subscription"); ctx.send_exit_with_success(doc_processor_mailbox).await?; return Err(ActorExitStatus::Success); } diff --git a/quickwit/quickwit-indexing/src/source/kafka_source.rs b/quickwit/quickwit-indexing/src/source/kafka_source.rs index 52c97722a47..3bc1259289a 100644 --- a/quickwit/quickwit-indexing/src/source/kafka_source.rs +++ b/quickwit/quickwit-indexing/src/source/kafka_source.rs @@ -510,7 +510,7 @@ impl Source for KafkaSource { ctx.send_message(doc_processor_mailbox, message).await?; } if self.should_exit() { - info!(topic = %self.topic, "Reached end of topic."); + info!(topic = %self.topic, "reached end of topic"); ctx.send_exit_with_success(doc_processor_mailbox).await?; return Err(ActorExitStatus::Success); } @@ -624,11 +624,11 @@ fn spawn_consumer_poll_loop( .expect("The offset should be valid."); } if let Err(error) = consumer.commit(&tpl, CommitMode::Async) { - warn!(error=?error, "Failed to commit offsets."); + warn!(error=?error, "failed to commit offsets"); } } } - debug!("Exiting consumer poll loop."); + debug!("exiting consumer poll loop"); consumer.unsubscribe(); }) } diff --git a/quickwit/quickwit-indexing/src/source/kinesis/kinesis_source.rs b/quickwit/quickwit-indexing/src/source/kinesis/kinesis_source.rs index 5df7892a39f..4064ccc4407 100644 --- a/quickwit/quickwit-indexing/src/source/kinesis/kinesis_source.rs +++ b/quickwit/quickwit-indexing/src/source/kinesis/kinesis_source.rs @@ -319,7 +319,7 @@ impl Source for KinesisSource { ctx.send_message(indexer_mailbox, batch).await?; } if self.state.shard_consumers.is_empty() { - info!(stream_name = %self.stream_name, "Reached end of stream."); + info!(stream_name = %self.stream_name, "reached end of stream"); ctx.send_exit_with_success(indexer_mailbox).await?; return Err(ActorExitStatus::Success); } diff --git a/quickwit/quickwit-indexing/src/source/pulsar_source.rs b/quickwit/quickwit-indexing/src/source/pulsar_source.rs index 23063c554ff..cb10092c499 100644 --- a/quickwit/quickwit-indexing/src/source/pulsar_source.rs +++ b/quickwit/quickwit-indexing/src/source/pulsar_source.rs @@ -155,7 +155,7 @@ impl PulsarSource { batch: &mut BatchBuilder, ) -> anyhow::Result<()> { if doc.is_empty() { - warn!("Message received from queue was empty."); + warn!("message received from queue was empty"); self.state.num_invalid_messages += 1; return Ok(()); } @@ -192,7 +192,7 @@ impl PulsarSource { } async fn try_ack_messages(&mut self, checkpoint: SourceCheckpoint) -> anyhow::Result<()> { - debug!(ckpt = ?checkpoint, "Truncating message queue."); + debug!(ckpt = ?checkpoint, "truncating message queue"); for (partition, position) in checkpoint.iter() { if let Some(msg_id) = msg_id_from_position(&position) { self.pulsar_consumer @@ -311,7 +311,7 @@ async fn create_pulsar_consumer( .into_iter() .map(|id| id.to_string()) .collect::>(); - info!(positions = ?current_positions, "Seeking to last checkpoint positions."); + info!(positions = ?current_positions, "seeking to last checkpoint positions"); for (_, position) in current_positions { let seek_to = msg_id_from_position(&position); diff --git a/quickwit/quickwit-ingest/src/ingest_api_service.rs b/quickwit/quickwit-ingest/src/ingest_api_service.rs index 73946f17bee..1d19b59927a 100644 --- a/quickwit/quickwit-ingest/src/ingest_api_service.rs +++ b/quickwit/quickwit-ingest/src/ingest_api_service.rs @@ -160,7 +160,7 @@ impl IngestApiService { let disk_usage = self.queues.disk_usage(); if disk_usage > self.disk_limit { - info!("Ingestion rejected due to disk limit"); + info!("ingestion rejected due to disk limit"); return Err(IngestServiceError::RateLimited); } @@ -169,7 +169,7 @@ impl IngestApiService { .reserve_capacity(request.cost() as usize) .is_err() { - info!("Ingest request rejected due to memory limit."); + info!("ingest request rejected due to memory limit"); return Err(IngestServiceError::RateLimited); } let mut num_docs = 0usize; diff --git a/quickwit/quickwit-integration-tests/src/test_utils/cluster_sandbox.rs b/quickwit/quickwit-integration-tests/src/test_utils/cluster_sandbox.rs index 2d832c9d0b3..c35dfb66996 100644 --- a/quickwit/quickwit-integration-tests/src/test_utils/cluster_sandbox.rs +++ b/quickwit/quickwit-integration-tests/src/test_utils/cluster_sandbox.rs @@ -124,7 +124,7 @@ pub(crate) async fn ingest_with_retry( .ingest(index_id, ingest_source_clone, None, None, commit_type_clone) .await { - debug!("Failed to index into {} due to error: {}", index_id, err); + debug!(index=%index_id, err=%err, "failed to ingest"); false } else { true diff --git a/quickwit/quickwit-jaeger/src/lib.rs b/quickwit/quickwit-jaeger/src/lib.rs index 925c5cba331..4cd82260018 100644 --- a/quickwit/quickwit-jaeger/src/lib.rs +++ b/quickwit/quickwit-jaeger/src/lib.rs @@ -267,11 +267,11 @@ impl JaegerService { let search_response = self.search_service.root_search(search_request).await?; let Some(agg_result_json) = search_response.aggregation else { - debug!("The query matched no traces."); + debug!("the query matched no traces"); return Ok((Vec::new(), 0..=0)); }; let trace_ids = collect_trace_ids(&agg_result_json)?; - debug!("The query matched {} traces.", trace_ids.0.len()); + debug!("the query matched {} traces.", trace_ids.0.len()); Ok(trace_ids) } @@ -315,7 +315,7 @@ impl JaegerService { let search_response = match self.search_service.root_search(search_request).await { Ok(search_response) => search_response, Err(search_error) => { - error!("Failed to fetch spans: {search_error:?}"); + error!(search_error=?search_error, "failed to fetch spans"); record_error(operation_name, request_start); return Err(Status::internal("Failed to fetch spans.")); } @@ -365,7 +365,7 @@ impl JaegerService { let chunk = mem::replace(&mut chunk, Vec::with_capacity(chunk_len)); if let Err(send_error) = tx.send(Ok(SpansResponseChunk { spans: chunk })).await { - debug!("Client disconnected: {send_error:?}"); + debug!(send_error=?send_error, "client disconnected"); return; } record_send(operation_name, num_spans, chunk_num_bytes); @@ -380,7 +380,7 @@ impl JaegerService { num_bytes_total += chunk_num_bytes; if let Err(send_error) = tx.send(Ok(SpansResponseChunk { spans: chunk })).await { - debug!("Client disconnected: {send_error:?}"); + debug!(error=?send_error, "client disconnected"); return; } record_send(operation_name, num_spans, chunk_num_bytes); @@ -809,7 +809,7 @@ fn inject_span_kind_tag(tags: &mut Vec, span_kind_id: u32) { 4 => "producer", 5 => "consumer", _ => { - warn!("Unknown span kind ID: `{span_kind_id}`."); + warn!(span_kind_id=%span_kind_id, "unknown span kind ID"); return; } }; @@ -1017,7 +1017,7 @@ where T: Deserialize<'a> { match serde_json::from_str(json) { Ok(deserialized) => Ok(deserialized), Err(error) => { - error!("Failed to deserialize {label}: {error:?}"); + error!("failed to deserialize {label}: {error:?}"); Err(Status::internal(format!( "Failed to deserialize {label}: {error:?}." ))) diff --git a/quickwit/quickwit-janitor/src/actors/delete_task_planner.rs b/quickwit/quickwit-janitor/src/actors/delete_task_planner.rs index d51e8274859..75d40b59ad8 100644 --- a/quickwit/quickwit-janitor/src/actors/delete_task_planner.rs +++ b/quickwit/quickwit-janitor/src/actors/delete_task_planner.rs @@ -196,7 +196,7 @@ impl DeleteTaskPlanner { let delete_operation = MergeOperation::new_delete_and_merge_operation( split_with_deletes.split_metadata, ); - info!(delete_operation=?delete_operation, "Planned delete operation."); + info!(delete_operation=?delete_operation, "planned delete operation"); let tracked_delete_operation = self .ongoing_delete_operations_inventory .track(delete_operation); diff --git a/quickwit/quickwit-janitor/src/actors/delete_task_service.rs b/quickwit/quickwit-janitor/src/actors/delete_task_service.rs index fb2800e6b97..9145fbf5df1 100644 --- a/quickwit/quickwit-janitor/src/actors/delete_task_service.rs +++ b/quickwit/quickwit-janitor/src/actors/delete_task_service.rs @@ -203,7 +203,7 @@ impl Handler for DeleteTaskService { ) -> Result<(), ActorExitStatus> { let result = self.update_pipeline_handles(ctx).await; if let Err(error) = result { - error!("Delete task pipelines update failed: {}", error); + error!(error=%error, "delete task pipelines update failed"); } ctx.schedule_self_msg(UPDATE_PIPELINES_INTERVAL, UpdatePipelines) .await; diff --git a/quickwit/quickwit-janitor/src/actors/garbage_collector.rs b/quickwit/quickwit-janitor/src/actors/garbage_collector.rs index 5e80b11d179..4ddd2c364d4 100644 --- a/quickwit/quickwit-janitor/src/actors/garbage_collector.rs +++ b/quickwit/quickwit-janitor/src/actors/garbage_collector.rs @@ -96,11 +96,11 @@ impl GarbageCollector { }) { Ok(metadatas) => metadatas, Err(error) => { - error!(error=?error, "Failed to list indexes from the metastore."); + error!(error=?error, "failed to list indexes from the metastore"); return; } }; - info!(index_ids=%indexes.iter().map(|im| im.index_id()).join(", "), "Garbage collecting indexes."); + info!(index_ids=%indexes.iter().map(|im| im.index_id()).join(", "), "garbage collecting indexes"); let mut gc_futures = stream::iter(indexes).map(|index| { let metastore = self.metastore.clone(); @@ -110,7 +110,7 @@ impl GarbageCollector { let storage = match storage_resolver.resolve(index_uri).await { Ok(storage) => storage, Err(error) => { - error!(index=%index.index_id(), error=?error, "Failed to resolve the index storage Uri."); + error!(index=%index.index_id(), error=?error, "failed to resolve the index storage Uri"); return None; } }; @@ -140,7 +140,7 @@ impl GarbageCollector { } Err(error) => { self.counters.num_failed_gc_run_on_index += 1; - error!(index_id=%index_uid.index_id(), error=?error, "Failed to run garbage collection on index."); + error!(index_id=%index_uid.index_id(), error=?error, "failed to run garbage collection on index"); continue; } }; diff --git a/quickwit/quickwit-janitor/src/actors/retention_policy_executor.rs b/quickwit/quickwit-janitor/src/actors/retention_policy_executor.rs index f8122a0b030..41ec217606e 100644 --- a/quickwit/quickwit-janitor/src/actors/retention_policy_executor.rs +++ b/quickwit/quickwit-janitor/src/actors/retention_policy_executor.rs @@ -91,11 +91,11 @@ impl RetentionPolicyExecutor { { Ok(metadatas) => metadatas, Err(error) => { - error!(error=?error, "Failed to list indexes from the metastore."); + error!(error=?error, "failed to list indexes from the metastore"); return; } }; - debug!(index_ids=%index_metadatas.iter().map(|im| im.index_id()).join(", "), "Retention policy refresh."); + debug!(index_ids=%index_metadatas.iter().map(|im| im.index_id()).join(", "), "retention policy refresh"); let deleted_indexes = compute_deleted_indexes( self.index_configs.keys().map(String::as_str), @@ -104,7 +104,7 @@ impl RetentionPolicyExecutor { .map(|index_metadata| index_metadata.index_id()), ); if !deleted_indexes.is_empty() { - debug!(index_ids=%deleted_indexes.iter().join(", "), "Deleting indexes from cache."); + debug!(index_ids=%deleted_indexes.iter().join(", "), "deleting indexes from cache"); for index_id in deleted_indexes { self.index_configs.remove(&index_id); } @@ -197,7 +197,7 @@ impl Handler for RetentionPolicyExecutor { let index_config = match self.index_configs.get(message.index_uid.index_id()) { Some(config) => config, None => { - debug!(index_id=%message.index_uid.index_id(), "The index might have been deleted."); + debug!(index_id=%message.index_uid.index_id(), "the index might have been deleted"); return Ok(()); } }; @@ -229,7 +229,7 @@ impl Handler for RetentionPolicyExecutor { // we remove it from the cache for it to be retried next time it gets // added back by the RetentionPolicyExecutor cache refresh loop. self.index_configs.remove(message.index_uid.index_id()); - error!(index_id=%message.index_uid.index_id(), "Couldn't extract the index next schedule interval."); + error!(index_id=%message.index_uid.index_id(), "couldn't extract the index next schedule interval"); } Ok(()) } diff --git a/quickwit/quickwit-janitor/src/lib.rs b/quickwit/quickwit-janitor/src/lib.rs index 84b4a3cf21c..9b888cff4b3 100644 --- a/quickwit/quickwit-janitor/src/lib.rs +++ b/quickwit/quickwit-janitor/src/lib.rs @@ -51,7 +51,7 @@ pub async fn start_janitor_service( storage_resolver: StorageResolver, event_broker: EventBroker, ) -> anyhow::Result> { - info!("Starting janitor service."); + info!("starting janitor service"); let garbage_collector = GarbageCollector::new(metastore.clone(), storage_resolver.clone()); let (_, garbage_collector_handle) = universe.spawn_builder().spawn(garbage_collector); diff --git a/quickwit/quickwit-metastore/src/checkpoint.rs b/quickwit/quickwit-metastore/src/checkpoint.rs index e8928a7efcc..594965df01b 100644 --- a/quickwit/quickwit-metastore/src/checkpoint.rs +++ b/quickwit/quickwit-metastore/src/checkpoint.rs @@ -292,7 +292,7 @@ impl SourceCheckpoint { match position.cmp(&delta_position.from) { Ordering::Equal => {} Ordering::Less => { - warn!(cur_pos=?position, delta_pos_from=?delta_position.from,partition=?delta_partition, "Some positions were skipped."); + warn!(cur_pos=?position, delta_pos_from=?delta_position.from,partition=?delta_partition, "some positions were skipped"); } Ordering::Greater => { return Err(IncompatibleCheckpointDelta { diff --git a/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/file_backed_index/mod.rs b/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/file_backed_index/mod.rs index 29edd6f1ecd..440765aa21d 100644 --- a/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/file_backed_index/mod.rs +++ b/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/file_backed_index/mod.rs @@ -456,13 +456,13 @@ impl FileBackedIndex { let message = "splits are not deletable".to_string(); return Err(MetastoreError::FailedPrecondition { entity, message }); } - info!(index_id=%self.index_id(), "Deleted {num_deleted_splits} splits from index."); + info!(index_id=%self.index_id(), "deleted {num_deleted_splits} splits from index"); if !split_not_found_ids.is_empty() { warn!( index_id=self.index_id().to_string(), split_ids=?PrettySample::new(&split_not_found_ids, 5), - "{} splits were not found and could not be deleted.", + "{} splits were not found and could not be deleted", split_not_found_ids.len() ); } diff --git a/quickwit/quickwit-metastore/src/metastore/postgresql_metastore.rs b/quickwit/quickwit-metastore/src/metastore/postgresql_metastore.rs index e6dee95f359..687befda13b 100644 --- a/quickwit/quickwit-metastore/src/metastore/postgresql_metastore.rs +++ b/quickwit/quickwit-metastore/src/metastore/postgresql_metastore.rs @@ -391,7 +391,7 @@ fn convert_sqlx_err(index_id: &str, sqlx_err: sqlx::Error) -> MetastoreError { } } _ => { - error!(err=?sqlx_err, "An error has occurred in the database operation."); + error!(err=?sqlx_err, "an error has occurred in the database operation"); MetastoreError::Db { message: sqlx_err.to_string(), } @@ -644,7 +644,7 @@ impl MetastoreService for PostgresqlMetastore { return Err(MetastoreError::FailedPrecondition { entity, message }); } - debug!(index_id=%index_uid.index_id(), num_splits=split_ids.len(), "Splits successfully staged."); + debug!(index_id=%index_uid.index_id(), num_splits=split_ids.len(), "splits successfully staged"); Ok(EmptyResponse {}) }) diff --git a/quickwit/quickwit-metastore/src/metastore/postgresql_model.rs b/quickwit/quickwit-metastore/src/metastore/postgresql_model.rs index be21704f920..a96be670726 100644 --- a/quickwit/quickwit-metastore/src/metastore/postgresql_model.rs +++ b/quickwit/quickwit-metastore/src/metastore/postgresql_model.rs @@ -48,7 +48,7 @@ impl PgIndex { pub fn index_metadata(&self) -> MetastoreResult { let mut index_metadata = serde_json::from_str::(&self.index_metadata_json) .map_err(|error| { - error!(index_id=%self.index_id, error=?error, "Failed to deserialize index metadata."); + error!(index_id=%self.index_id, error=?error, "failed to deserialize index metadata"); MetastoreError::JsonDeserializeError { struct_name: "IndexMetadata".to_string(), @@ -125,7 +125,7 @@ impl PgSplit { /// Deserializes and returns the split's metadata. fn split_metadata(&self) -> MetastoreResult { serde_json::from_str::(&self.split_metadata_json).map_err(|error| { - error!(index_id=%self.index_uid.index_id(), split_id=%self.split_id, error=?error, "Failed to deserialize split metadata."); + error!(index_id=%self.index_uid.index_id(), split_id=%self.split_id, error=?error, "failed to deserialize split metadata"); MetastoreError::JsonDeserializeError { struct_name: "SplitMetadata".to_string(), @@ -137,8 +137,7 @@ impl PgSplit { /// Deserializes and returns the split's state. fn split_state(&self) -> MetastoreResult { SplitState::from_str(&self.split_state).map_err(|error| { - error!(index_id=%self.index_uid.index_id(), split_id=%self.split_id, split_state=?self.split_state, error=?error, "Failed to deserialize split state."); - + error!(index_id=%self.index_uid.index_id(), split_id=%self.split_id, split_state=?self.split_state, error=?error, "failed to deserialize split state"); MetastoreError::JsonDeserializeError { struct_name: "SplitState".to_string(), message: error, @@ -189,7 +188,7 @@ impl PgDeleteTask { /// Deserializes and returns the split's metadata. fn delete_query(&self) -> MetastoreResult { serde_json::from_str::(&self.delete_query_json).map_err(|error| { - error!(index_id=%self.index_uid.index_id(), opstamp=%self.opstamp, error=?error, "Failed to deserialize delete query."); + error!(index_id=%self.index_uid.index_id(), opstamp=%self.opstamp, error=?error, "failed to deserialize delete query"); MetastoreError::JsonDeserializeError { struct_name: "DeleteQuery".to_string(), diff --git a/quickwit/quickwit-metastore/src/tests/mod.rs b/quickwit/quickwit-metastore/src/tests/mod.rs index f97122cb962..ad641af731e 100644 --- a/quickwit/quickwit-metastore/src/tests/mod.rs +++ b/quickwit/quickwit-metastore/src/tests/mod.rs @@ -3458,7 +3458,7 @@ pub async fn test_metastore_list_stale_splits< )); { - info!("List stale splits on an index"); + info!("list stale splits on an index"); let create_index_request = CreateIndexRequest::try_from_index_config(index_config.clone()).unwrap(); let index_uid: IndexUid = metastore @@ -3615,7 +3615,7 @@ pub async fn test_metastore_update_splits_delete_opstamp< }; { - info!("Update splits delete opstamp on a non-existent index."); + info!("update splits delete opstamp on a non-existent index"); let update_splits_delete_opstamp_request = UpdateSplitsDeleteOpstampRequest { index_uid: IndexUid::new_with_random_ulid("index-not-found").to_string(), split_ids: vec![split_id_1.clone()], @@ -3633,7 +3633,7 @@ pub async fn test_metastore_update_splits_delete_opstamp< } { - info!("Update splits delete opstamp on an index."); + info!("update splits delete opstamp on an index"); let create_index_request = CreateIndexRequest::try_from_index_config(index_config.clone()).unwrap(); let index_uid: IndexUid = metastore diff --git a/quickwit/quickwit-opentelemetry/src/otlp/logs.rs b/quickwit/quickwit-opentelemetry/src/otlp/logs.rs index d2f6e604227..218b3879c68 100644 --- a/quickwit/quickwit-opentelemetry/src/otlp/logs.rs +++ b/quickwit/quickwit-opentelemetry/src/otlp/logs.rs @@ -244,7 +244,7 @@ impl OtlpGrpcLogsService { }) .await .map_err(|join_error| { - error!("Failed to parse log records: {join_error:?}"); + error!(error=?join_error, "failed to parse log records"); Status::internal("failed to parse log records") })??; if num_log_records == num_parse_errors { @@ -385,8 +385,8 @@ impl OtlpGrpcLogsService { let mut doc_batch = DocBatchBuilder::new(OTEL_LOGS_INDEX_ID.to_string()).json_writer(); for log_record in log_records { if let Err(error) = doc_batch.ingest_doc(&log_record.0) { - error!(error=?error, "Failed to JSON serialize span."); - error_message = format!("Failed to JSON serialize span: {error:?}"); + error!(error=?error, "failed to JSON serialize span"); + error_message = format!("failed to JSON serialize span: {error:?}"); num_parse_errors += 1; } } diff --git a/quickwit/quickwit-opentelemetry/src/otlp/traces.rs b/quickwit/quickwit-opentelemetry/src/otlp/traces.rs index 6a67ccfeb82..8be4544aaf1 100644 --- a/quickwit/quickwit-opentelemetry/src/otlp/traces.rs +++ b/quickwit/quickwit-opentelemetry/src/otlp/traces.rs @@ -408,7 +408,7 @@ impl FromStr for SpanKind { "5" | "consumer" | "SPAN_KIND_CONSUMER" => 5, _ => { if !span_kind.is_empty() { - warn!("Unexpected span kind: {}", span_kind); + warn!(span_kind=%span_kind, "unexpected span kind"); } return Err(format!("Unexpected span kind: {span_kind}")); } @@ -702,7 +702,7 @@ impl OtlpGrpcTracesService { }) .await .map_err(|join_error| { - error!("Failed to parse spans: {join_error:?}"); + error!(join_error=?join_error, "failed to parse spans"); Status::internal("Failed to parse spans.") })??; if num_spans == 0 { @@ -747,7 +747,7 @@ impl OtlpGrpcTracesService { DocBatchBuilder::new(OTEL_TRACES_INDEX_ID.to_string()).json_writer(); for span in spans { if let Err(error) = doc_batch_builder.ingest_doc(&span.0) { - error!(error=?error, "failed to JSON serialize span."); + error!(error=?error, "failed to JSON serialize span"); error_message = format!("failed to JSON serialize span: {error:?}"); num_parse_errors += 1; } diff --git a/quickwit/quickwit-search/src/cluster_client.rs b/quickwit/quickwit-search/src/cluster_client.rs index 87f7eb1f4a3..b6ee4d14103 100644 --- a/quickwit/quickwit-search/src/cluster_client.rs +++ b/quickwit/quickwit-search/src/cluster_client.rs @@ -187,7 +187,7 @@ impl ClusterClient { // We only log a warning as it might be that we are just running in a // single node cluster. // (That's odd though, the node running this code should be in the pool too) - warn!("No other node available to replicate scroll context."); + warn!("no other node available to replicate scroll context"); return; } diff --git a/quickwit/quickwit-search/src/fetch_docs.rs b/quickwit/quickwit-search/src/fetch_docs.rs index 7ef96fc501d..2fa4c892b9f 100644 --- a/quickwit/quickwit-search/src/fetch_docs.rs +++ b/quickwit/quickwit-search/src/fetch_docs.rs @@ -87,7 +87,7 @@ async fn fetch_docs_to_map( .iter() .map(|split| split.split_id.clone()) .collect_vec(); - error!(split_ids = ?split_ids, error = ?error, "Error when fetching docs in splits."); + error!(split_ids = ?split_ids, error = ?error, "error when fetching docs in splits"); anyhow::anyhow!( "error when fetching docs for splits {:?}: {:?}", split_ids, diff --git a/quickwit/quickwit-search/src/root.rs b/quickwit/quickwit-search/src/root.rs index 2a916f00fa4..07489720df9 100644 --- a/quickwit/quickwit-search/src/root.rs +++ b/quickwit/quickwit-search/src/root.rs @@ -506,7 +506,7 @@ pub(crate) async fn search_partial_hits_phase( "Merged leaf search response." ); if !leaf_search_response.failed_splits.is_empty() { - error!(failed_splits = ?leaf_search_response.failed_splits, "Leaf search response contains at least one failed split."); + error!(failed_splits = ?leaf_search_response.failed_splits, "leaf search response contains at least one failed split"); let errors: String = leaf_search_response.failed_splits.iter().join(", "); return Err(SearchError::Internal(errors)); } diff --git a/quickwit/quickwit-search/src/thread_pool.rs b/quickwit/quickwit-search/src/thread_pool.rs index a4be063ba0c..2d763cbc8ce 100644 --- a/quickwit/quickwit-search/src/thread_pool.rs +++ b/quickwit/quickwit-search/src/thread_pool.rs @@ -29,7 +29,7 @@ fn search_thread_pool() -> &'static rayon::ThreadPool { rayon::ThreadPoolBuilder::new() .thread_name(|thread_id| format!("quickwit-search-{thread_id}")) .panic_handler(|_my_panic| { - error!("Task running in the quickwit search pool panicked."); + error!("task running in the quickwit search pool panicked"); }) .build() .expect("Failed to spawn the spawning pool") diff --git a/quickwit/quickwit-serve/src/format.rs b/quickwit/quickwit-serve/src/format.rs index 5f18294366f..4f1436e24e6 100644 --- a/quickwit/quickwit-serve/src/format.rs +++ b/quickwit/quickwit-serve/src/format.rs @@ -44,10 +44,10 @@ impl BodyFormat { fn value_to_vec(&self, value: &impl serde::Serialize) -> Result, ()> { match &self { Self::Json => serde_json::to_vec(value).map_err(|_| { - tracing::error!("Error: the response serialization failed."); + tracing::error!("the response serialization failed"); }), Self::PrettyJson => serde_json::to_vec_pretty(value).map_err(|_| { - tracing::error!("Error: the response serialization failed."); + tracing::error!("the response serialization failed"); }), } } diff --git a/quickwit/quickwit-serve/src/health_check_api/handler.rs b/quickwit/quickwit-serve/src/health_check_api/handler.rs index f5733d2b0d2..430ad014a48 100644 --- a/quickwit/quickwit-serve/src/health_check_api/handler.rs +++ b/quickwit/quickwit-serve/src/health_check_api/handler.rs @@ -79,13 +79,13 @@ async fn get_liveness( if let Some(indexer_service) = indexer_service_opt { if !indexer_service.ask(Healthz).await.unwrap_or(false) { - error!("The indexer service is unhealthy."); + error!("the indexer service is unhealthy"); is_live = false; } } if let Some(janitor_service) = janitor_service_opt { if !janitor_service.ask(Healthz).await.unwrap_or(false) { - error!("The janitor service is unhealthy."); + error!("the janitor service is unhealthy"); is_live = false; } } diff --git a/quickwit/quickwit-serve/src/lib.rs b/quickwit/quickwit-serve/src/lib.rs index e2cd13ce49d..83ffb4386a4 100644 --- a/quickwit/quickwit-serve/src/lib.rs +++ b/quickwit/quickwit-serve/src/lib.rs @@ -292,7 +292,7 @@ pub async fn serve_quickwit( .await .is_err() { - error!("No metastore service found among cluster members, stopping server."); + error!("no metastore service found among cluster members, stopping server"); anyhow::bail!( "failed to start server: no metastore service was found among cluster \ members. try running Quickwit with additional metastore service `quickwit \ @@ -472,13 +472,13 @@ pub async fn serve_quickwit( let (grpc_readiness_trigger_tx, grpc_readiness_signal_rx) = oneshot::channel::<()>(); let grpc_readiness_trigger = Box::pin(async move { if grpc_readiness_trigger_tx.send(()).is_err() { - debug!("gRPC server readiness signal receiver was dropped."); + debug!("gRPC server readiness signal receiver was dropped"); } }); let (grpc_shutdown_trigger_tx, grpc_shutdown_signal_rx) = oneshot::channel::<()>(); let grpc_shutdown_signal = Box::pin(async move { if grpc_shutdown_signal_rx.await.is_err() { - debug!("gRPC server shutdown trigger sender was dropped."); + debug!("gRPC server shutdown trigger sender was dropped"); } }); let grpc_server = grpc::start_grpc_server( @@ -491,13 +491,13 @@ pub async fn serve_quickwit( let (rest_readiness_trigger_tx, rest_readiness_signal_rx) = oneshot::channel::<()>(); let rest_readiness_trigger = Box::pin(async move { if rest_readiness_trigger_tx.send(()).is_err() { - debug!("REST server readiness signal receiver was dropped."); + debug!("REST server readiness signal receiver was dropped"); } }); let (rest_shutdown_trigger_tx, rest_shutdown_signal_rx) = oneshot::channel::<()>(); let rest_shutdown_signal = Box::pin(async move { if rest_shutdown_signal_rx.await.is_err() { - debug!("REST server shutdown trigger sender was dropped."); + debug!("REST server shutdown trigger sender was dropped"); } }); let rest_server = rest::start_rest_server( @@ -524,10 +524,10 @@ pub async fn serve_quickwit( let actor_exit_statuses = universe.quit().await; if grpc_shutdown_trigger_tx.send(()).is_err() { - debug!("gRPC server shutdown signal receiver was dropped."); + debug!("gRPC server shutdown signal receiver was dropped"); } if rest_shutdown_trigger_tx.send(()).is_err() { - debug!("REST server shutdown signal receiver was dropped."); + debug!("REST server shutdown signal receiver was dropped"); } actor_exit_statuses }); @@ -788,11 +788,11 @@ async fn node_readiness_reporting_task( let node_ready = match metastore.check_connectivity().await { Ok(()) => { - debug!(metastore_endpoints=?metastore.endpoints(), "metastore service is available."); + debug!(metastore_endpoints=?metastore.endpoints(), "metastore service is available"); true } Err(error) => { - warn!(metastore_endpoints=?metastore.endpoints(), error=?error, "metastore service is unavailable."); + warn!(metastore_endpoints=?metastore.endpoints(), error=?error, "metastore service is unavailable"); false } }; diff --git a/quickwit/quickwit-serve/src/search_api/rest_handler.rs b/quickwit/quickwit-serve/src/search_api/rest_handler.rs index a38d4c3439f..a5e1e4acb6c 100644 --- a/quickwit/quickwit-serve/src/search_api/rest_handler.rs +++ b/quickwit/quickwit-serve/src/search_api/rest_handler.rs @@ -456,7 +456,7 @@ async fn search_stream_endpoint( // trailer. Thus we also call `sender.abort()` so that the // client will see something wrong happened. But he will // need to look at the logs to understand that. - tracing::error!(error=?error, "Error when streaming search results."); + tracing::error!(error=?error, "error when streaming search results"); let header_value_str = format!("Error when streaming search results: {error:?}."); let header_value = HeaderValue::from_str(header_value_str.as_str()) diff --git a/quickwit/quickwit-storage/src/local_file_storage.rs b/quickwit/quickwit-storage/src/local_file_storage.rs index 39e0f11d513..69b6071469c 100644 --- a/quickwit/quickwit-storage/src/local_file_storage.rs +++ b/quickwit/quickwit-storage/src/local_file_storage.rs @@ -246,7 +246,7 @@ impl Storage for LocalFileStorage { self.delete_single_file(path).await?; if let Some(parent) = path.parent() { if let Err(error) = delete_all_dirs_if_empty(&self.root, parent).await { - warn!(error=?error, path=%path.display(), "Failed to delete directory."); + warn!(error=?error, path=%path.display(), "failed to delete directory"); } } Ok(()) @@ -293,7 +293,7 @@ impl Storage for LocalFileStorage { // first. for parent_path in parent_paths.into_iter().rev() { if let Err(error) = delete_all_dirs_if_empty(&self.root, parent_path).await { - warn!(error=?error, path=%parent_path.display(), "Failed to delete directory."); + warn!(error=?error, path=%parent_path.display(), "failed to delete directory"); } } if failures.is_empty() { diff --git a/quickwit/quickwit-storage/src/object_storage/s3_compatible_storage.rs b/quickwit/quickwit-storage/src/object_storage/s3_compatible_storage.rs index cbca48b5ad6..b8a9b963085 100644 --- a/quickwit/quickwit-storage/src/object_storage/s3_compatible_storage.rs +++ b/quickwit/quickwit-storage/src/object_storage/s3_compatible_storage.rs @@ -113,7 +113,7 @@ fn get_credentials_provider( &s3_storage_config.secret_access_key, ) { (Some(access_key_id), Some(secret_access_key)) => { - info!("Using S3 credentials defined in storage config."); + info!("using S3 credentials defined in storage config"); let credentials = Credentials::from_keys(access_key_id, secret_access_key, None); let credentials_provider = SharedCredentialsProvider::new(credentials); Some(credentials_provider) @@ -124,7 +124,7 @@ fn get_credentials_provider( fn get_region(s3_storage_config: &S3StorageConfig) -> Option { s3_storage_config.region.clone().map(|region| { - info!(region=%region, "Using S3 region defined in storage config."); + info!(region=%region, "using S3 region defined in storage config"); Region::new(region) }) } @@ -145,7 +145,7 @@ async fn create_s3_client(s3_storage_config: &S3StorageConfig) -> S3Client { s3_config.set_timeout_config(aws_config.timeout_config().cloned()); if let Some(endpoint) = s3_storage_config.endpoint() { - info!(endpoint=%endpoint, "Using S3 endpoint defined in storage config or environment variable."); + info!(endpoint=%endpoint, "using S3 endpoint defined in storage config or environment variable"); s3_config.set_endpoint_url(Some(endpoint)); } S3Client::from_conf(s3_config.build()) diff --git a/quickwit/quickwit-storage/src/split_cache/download_task.rs b/quickwit/quickwit-storage/src/split_cache/download_task.rs index d0e6acd2ca0..b0cab44cb11 100644 --- a/quickwit/quickwit-storage/src/split_cache/download_task.rs +++ b/quickwit/quickwit-storage/src/split_cache/download_task.rs @@ -42,7 +42,7 @@ pub(crate) fn delete_evicted_splits(root_path: &Path, splits_to_delete: &[Ulid]) if let Err(_io_err) = std::fs::remove_file(&split_file_path) { // This is an pretty critical error. The split size is not tracked anymore at this // point. - error!(path=%split_file_path.display(), "Failed to remove split file from cache directory. This is critical as the file is now not taken in account in the cache size limits."); + error!(path=%split_file_path.display(), "failed to remove split file from cache directory. This is critical as the file is now not taken in account in the cache size limits"); } } } diff --git a/quickwit/quickwit-storage/src/split_cache/mod.rs b/quickwit/quickwit-storage/src/split_cache/mod.rs index 59d4cde360c..e23289307b3 100644 --- a/quickwit/quickwit-storage/src/split_cache/mod.rs +++ b/quickwit/quickwit-storage/src/split_cache/mod.rs @@ -78,7 +78,7 @@ impl SplitCache { // their cleanup. It is important to remove it. if let Err(io_err) = std::fs::remove_file(&path) { if io_err.kind() != io::ErrorKind::NotFound { - error!(path=?path, "Failed to remove temporary file."); + error!(path=?path, "failed to remove temporary file"); } } } @@ -86,11 +86,11 @@ impl SplitCache { if let Some(split_ulid) = split_id_from_path(&path) { existing_splits.insert(split_ulid, meta.len()); } else { - warn!(path=%path.display(), ".split file with invalid ulid in split cache directory. Ignoring."); + warn!(path=%path.display(), ".split file with invalid ulid in split cache directory, ignoring"); } } _ => { - warn!(path=%path.display(), "Unknown file in split cache directory. Ignoring."); + warn!(path=%path.display(), "unknown file in split cache directory, ignoring"); } } } @@ -135,11 +135,11 @@ impl SplitCache { let mut split_table = self.split_table.lock().unwrap(); for report_split in report_splits { let Ok(split_ulid) = Ulid::from_str(&report_split.split_id) else { - error!(split_id=%report_split.split_id, "Received invalid split ulid. Ignoring."); + error!(split_id=%report_split.split_id, "received invalid split ulid: ignoring"); continue; }; let Ok(storage_uri) = Uri::from_str(&report_split.storage_uri) else { - error!(storage_uri=%report_split.storage_uri, "Received invalid storage uri. Ignoring."); + error!(storage_uri=%report_split.storage_uri, "received invalid storage uri: ignoring"); continue; }; split_table.report(split_ulid, storage_uri); diff --git a/quickwit/quickwit-telemetry/src/lib.rs b/quickwit/quickwit-telemetry/src/lib.rs index 80d7a546930..38c1948656a 100644 --- a/quickwit/quickwit-telemetry/src/lib.rs +++ b/quickwit/quickwit-telemetry/src/lib.rs @@ -42,7 +42,7 @@ pub fn start_telemetry_loop(quickwit_info: QuickwitTelemetryInfo) -> Option) { fn create_http_client() -> Option { if is_telemetry_disabled() { - info!("telemetry to quickwit is disabled."); + info!("telemetry to quickwit is disabled"); return None; } let client = HttpClient::try_new()?; - info!("telemetry to {} is enabled.", client.endpoint()); + info!("telemetry to {} is enabled", client.endpoint()); Some(client) } diff --git a/quickwit/scripts/check_log_format.sh b/quickwit/scripts/check_log_format.sh new file mode 100755 index 00000000000..96f2d2ec061 --- /dev/null +++ b/quickwit/scripts/check_log_format.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +RESULT=0 + +for file in $(git ls-files | egrep "src/.*\.rs$") +do + LOG_STARTING_WITH_UPPERCASE=$(egrep -n "(warn|info|error|debug)\!\(\"[A-Z][a-z]" $file) + DIFFRESULT=$? + LOG_ENDING_WITH_PERIOD=$(egrep -n "(warn|info|error|debug)\!.*\.\"\);" $file) + DIFFRESULT=$(($DIFFRESULT && $?)) + if [ $DIFFRESULT -eq 0 ]; then + echo "====================" + echo $file + echo $LOG_STARTING_WITH_UPPERCASE + echo $LOG_ENDING_WITH_PERIOD + echo $FAULTY_LINES + RESULT=1 + fi +done + +exit $RESULT