diff --git a/.github/workflows/apidoc.yml b/.github/workflows/apidoc.yml index d0392a224176..7c3ed31d11a0 100644 --- a/.github/workflows/apidoc.yml +++ b/.github/workflows/apidoc.yml @@ -13,7 +13,7 @@ on: name: Build API docs env: - RUST_TOOLCHAIN: nightly-2024-04-20 + RUST_TOOLCHAIN: nightly-2024-08-07 jobs: apidoc: diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml index a81899c5175f..e17305a47bb6 100644 --- a/.github/workflows/develop.yml +++ b/.github/workflows/develop.yml @@ -30,7 +30,7 @@ concurrency: cancel-in-progress: true env: - RUST_TOOLCHAIN: nightly-2024-04-20 + RUST_TOOLCHAIN: nightly-2024-08-07 jobs: check-typos-and-docs: diff --git a/.github/workflows/nightly-ci.yml b/.github/workflows/nightly-ci.yml index 2f7862973a9e..e268c7d5c5a8 100644 --- a/.github/workflows/nightly-ci.yml +++ b/.github/workflows/nightly-ci.yml @@ -10,7 +10,7 @@ concurrency: cancel-in-progress: true env: - RUST_TOOLCHAIN: nightly-2024-04-20 + RUST_TOOLCHAIN: nightly-2024-08-07 permissions: issues: write diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 842311cd763b..5fd36f1d7dbd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -82,7 +82,7 @@ on: # Use env variables to control all the release process. env: # The arguments of building greptime. - RUST_TOOLCHAIN: nightly-2024-04-20 + RUST_TOOLCHAIN: nightly-2024-08-07 CARGO_PROFILE: nightly # Controls whether to run tests, include unit-test, integration-test and sqlness. diff --git a/Cargo.lock b/Cargo.lock index 2ebe6c8d6bf8..c168fa0f387e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4568,9 +4568,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "human-panic" -version = "1.2.3" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4f016c89920bbb30951a8405ecacbb4540db5524313b9445736e7e1855cf370" +checksum = "1c5a08ed290eac04006e21e63d32e90086b6182c7cd0452d10f4264def1fec9a" dependencies = [ "anstream", "anstyle", diff --git a/Cargo.toml b/Cargo.toml index 41db4a72a0a4..72763337dcde 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,6 +77,7 @@ clippy.readonly_write_lock = "allow" rust.unknown_lints = "deny" # Remove this after https://github.com/PyO3/pyo3/issues/4094 rust.non_local_definitions = "allow" +rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } [workspace.dependencies] # We turn off default-features for some dependencies here so the workspaces which inherit them can diff --git a/rust-toolchain.toml b/rust-toolchain.toml index dc1d13276060..9005e8719e36 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,3 @@ [toolchain] -channel = "nightly-2024-04-20" +channel = "nightly-2024-08-07" + diff --git a/src/client/src/database.rs b/src/client/src/database.rs index 80dc51df2ef6..9ac97df63658 100644 --- a/src/client/src/database.rs +++ b/src/client/src/database.rs @@ -91,7 +91,7 @@ impl Database { /// /// - the name of database when using GreptimeDB standalone or cluster /// - the name provided by GreptimeCloud or other multi-tenant GreptimeDB - /// environment + /// environment pub fn new_with_dbname(dbname: impl Into, client: Client) -> Self { Self { catalog: String::default(), diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml index 405c855c8ac8..71841aabeccb 100644 --- a/src/cmd/Cargo.toml +++ b/src/cmd/Cargo.toml @@ -51,7 +51,7 @@ file-engine.workspace = true flow.workspace = true frontend = { workspace = true, default-features = false } futures.workspace = true -human-panic = "1.2.2" +human-panic = "2.0" lazy_static.workspace = true meta-client.workspace = true meta-srv.workspace = true diff --git a/src/cmd/src/bin/greptime.rs b/src/cmd/src/bin/greptime.rs index f49cf2dff097..f36d0f1331f8 100644 --- a/src/cmd/src/bin/greptime.rs +++ b/src/cmd/src/bin/greptime.rs @@ -139,13 +139,10 @@ async fn start(cli: Command) -> Result<()> { } fn setup_human_panic() { - let metadata = human_panic::Metadata { - version: env!("CARGO_PKG_VERSION").into(), - name: "GreptimeDB".into(), - authors: Default::default(), - homepage: "https://github.com/GreptimeTeam/greptimedb/discussions".into(), - }; - human_panic::setup_panic!(metadata); + human_panic::setup_panic!( + human_panic::Metadata::new("GreptimeDB", env!("CARGO_PKG_VERSION")) + .homepage("https://github.com/GreptimeTeam/greptimedb/discussions") + ); common_telemetry::set_panic_hook(); } diff --git a/src/common/catalog/src/lib.rs b/src/common/catalog/src/lib.rs index 8ef85a300f7f..7eacc9931e53 100644 --- a/src/common/catalog/src/lib.rs +++ b/src/common/catalog/src/lib.rs @@ -48,19 +48,19 @@ pub fn build_db_string(catalog: &str, schema: &str) -> String { /// The database name may come from different sources: /// /// - MySQL `schema` name in MySQL protocol login request: it's optional and user -/// and switch database using `USE` command +/// and switch database using `USE` command /// - Postgres `database` parameter in Postgres wire protocol, required /// - HTTP RESTful API: the database parameter, optional /// - gRPC: the dbname field in header, optional but has a higher priority than -/// original catalog/schema +/// original catalog/schema /// /// When database name is provided, we attempt to parse catalog and schema from /// it. We assume the format `[-]`: /// /// - If `[-]` part is not provided, we use whole database name as -/// schema name +/// schema name /// - if `[-]` is provided, we split database name with `-` and use -/// `` and ``. +/// `` and ``. pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (String, String) { match parse_optional_catalog_and_schema_from_db_string(db) { (Some(catalog), schema) => (catalog, schema), diff --git a/src/common/function/src/function.rs b/src/common/function/src/function.rs index 433f54b6ce7d..59c64a8d774d 100644 --- a/src/common/function/src/function.rs +++ b/src/common/function/src/function.rs @@ -32,7 +32,7 @@ pub struct FunctionContext { impl FunctionContext { /// Create a mock [`FunctionContext`] for test. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn mock() -> Self { Self { query_ctx: QueryContextBuilder::default().build().into(), diff --git a/src/common/function/src/scalars/aggregate/percentile.rs b/src/common/function/src/scalars/aggregate/percentile.rs index 231e0bf43a2c..2d3e5482fe38 100644 --- a/src/common/function/src/scalars/aggregate/percentile.rs +++ b/src/common/function/src/scalars/aggregate/percentile.rs @@ -75,7 +75,7 @@ where // to keep the not_greater length == floor+1 // so to ensure the peek of the not_greater is array[floor] // and the peek of the greater is array[floor+1] - let p = if let Some(p) = self.p { p } else { 0.0_f64 }; + let p = self.p.unwrap_or(0.0_f64); let floor = (((self.n - 1) as f64) * p / (100_f64)).floor(); if value <= *self.not_greater.peek().unwrap() { self.not_greater.push(value); diff --git a/src/common/function/src/scalars/aggregate/scipy_stats_norm_pdf.rs b/src/common/function/src/scalars/aggregate/scipy_stats_norm_pdf.rs index 3045ae8665dd..8afdf5c81674 100644 --- a/src/common/function/src/scalars/aggregate/scipy_stats_norm_pdf.rs +++ b/src/common/function/src/scalars/aggregate/scipy_stats_norm_pdf.rs @@ -245,7 +245,7 @@ mod test { ]; scipy_stats_norm_pdf.update_batch(&v).unwrap(); assert_eq!( - Value::from(0.17843340219081558), + Value::from(0.17843340219081552), scipy_stats_norm_pdf.evaluate().unwrap() ); diff --git a/src/common/function/src/state.rs b/src/common/function/src/state.rs index 55953b679422..89f130a0bbd0 100644 --- a/src/common/function/src/state.rs +++ b/src/common/function/src/state.rs @@ -28,7 +28,7 @@ pub struct FunctionState { impl FunctionState { /// Create a mock [`FunctionState`] for test. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn mock() -> Self { use std::sync::Arc; diff --git a/src/common/macro/src/lib.rs b/src/common/macro/src/lib.rs index 8b10b83e8689..8079be46a887 100644 --- a/src/common/macro/src/lib.rs +++ b/src/common/macro/src/lib.rs @@ -76,6 +76,7 @@ pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream { /// - `&ProcedureServiceHandlerRef` or `&TableMutationHandlerRef` or `FlowServiceHandlerRef` as the first argument, /// - `&QueryContextRef` as the second argument, and /// - `&[ValueRef<'_>]` as the third argument which is SQL function input values in each row. +/// /// Return type must be `common_query::error::Result`. /// /// # Example see `common/function/src/system/procedure_state.rs`. diff --git a/src/common/meta/src/lock_key.rs b/src/common/meta/src/lock_key.rs index ea5e9b5d334b..324733aae227 100644 --- a/src/common/meta/src/lock_key.rs +++ b/src/common/meta/src/lock_key.rs @@ -172,8 +172,8 @@ impl From for StringKey { /// /// Note: /// - Allows modification the corresponding region's [TableRouteValue](crate::key::table_route::TableRouteValue), -/// [TableDatanodeValue](crate::key::datanode_table::DatanodeTableValue) even if -/// it acquires the [RegionLock::Write] only without acquiring the [TableLock::Write]. +/// [TableDatanodeValue](crate::key::datanode_table::DatanodeTableValue) even if +/// it acquires the [RegionLock::Write] only without acquiring the [TableLock::Write]. /// /// - Should acquire [TableLock] of the table at same procedure. /// diff --git a/src/common/procedure/src/local.rs b/src/common/procedure/src/local.rs index 574fb612b246..63fc06270a10 100644 --- a/src/common/procedure/src/local.rs +++ b/src/common/procedure/src/local.rs @@ -51,7 +51,7 @@ const META_TTL: Duration = Duration::from_secs(60 * 10); /// [Notify] is not a condition variable, we can't guarantee the waiters are notified /// if they didn't call `notified()` before we signal the notify. So we /// 1. use dedicated notify for each condition, such as waiting for a lock, waiting -/// for children; +/// for children; /// 2. always use `notify_one` and ensure there are only one waiter. #[derive(Debug)] pub(crate) struct ProcedureMeta { diff --git a/src/common/test-util/src/lib.rs b/src/common/test-util/src/lib.rs index 08eeadc23301..75f3cb18781c 100644 --- a/src/common/test-util/src/lib.rs +++ b/src/common/test-util/src/lib.rs @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![feature(lazy_cell)] - use std::path::{Path, PathBuf}; use std::process::Command; use std::sync::LazyLock; diff --git a/src/flow/src/lib.rs b/src/flow/src/lib.rs index 738ed524ba04..d01e326427bb 100644 --- a/src/flow/src/lib.rs +++ b/src/flow/src/lib.rs @@ -17,7 +17,6 @@ //! It also contains definition of expression, adapter and plan, and internal state management. #![feature(let_chains)] -#![feature(duration_abs_diff)] #![allow(dead_code)] #![warn(clippy::missing_docs_in_private_items)] #![warn(clippy::too_many_lines)] diff --git a/src/log-store/src/kafka/util/record.rs b/src/log-store/src/kafka/util/record.rs index db43c05a447f..0f4e794a7318 100644 --- a/src/log-store/src/kafka/util/record.rs +++ b/src/log-store/src/kafka/util/record.rs @@ -40,8 +40,9 @@ pub(crate) const ESTIMATED_META_SIZE: usize = 256; /// - If the entry is able to fit into a Kafka record, it's converted into a Full record. /// /// - If the entry is too large to fit into a Kafka record, it's converted into a collection of records. +/// /// Those records must contain exactly one First record and one Last record, and potentially several -/// Middle records. There may be no Middle record. +/// Middle records. There may be no Middle record. #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] pub enum RecordType { /// The record is self-contained, i.e. an entry's data is fully stored into this record. diff --git a/src/log-store/src/raft_engine.rs b/src/log-store/src/raft_engine.rs index 86a46bb1a02f..fdd88a49f133 100644 --- a/src/log-store/src/raft_engine.rs +++ b/src/log-store/src/raft_engine.rs @@ -23,6 +23,7 @@ use store_api::logstore::entry::{Entry, NaiveEntry}; use store_api::logstore::provider::Provider; use store_api::storage::RegionId; +#[allow(renamed_and_removed_lints)] pub mod protos { include!(concat!(env!("OUT_DIR"), concat!("/", "protos/", "mod.rs"))); } diff --git a/src/meta-srv/src/lib.rs b/src/meta-srv/src/lib.rs index c39cde24f3b0..44784ce32694 100644 --- a/src/meta-srv/src/lib.rs +++ b/src/meta-srv/src/lib.rs @@ -15,7 +15,6 @@ #![feature(async_closure)] #![feature(result_flattening)] #![feature(assert_matches)] -#![feature(option_take_if)] #![feature(extract_if)] pub mod bootstrap; diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs index 1404b8aca2b7..11dbebb182ee 100644 --- a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs +++ b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs @@ -33,8 +33,8 @@ impl UpdateMetadata { /// About the failure of updating the [TableRouteValue](common_meta::key::table_region::TableRegionValue): /// /// - There may be another [RegionMigrationProcedure](crate::procedure::region_migration::RegionMigrationProcedure) - /// that is executed concurrently for **other region**. - /// It will only update **other region** info. Therefore, It's safe to retry after failure. + /// that is executed concurrently for **other region**. + /// It will only update **other region** info. Therefore, It's safe to retry after failure. /// /// - There is no other DDL procedure executed concurrently for the current table. pub async fn downgrade_leader_region(&self, ctx: &mut Context) -> Result<()> { diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs index 63d9f1d52fdc..17cdabc6a0ac 100644 --- a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs +++ b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs @@ -122,7 +122,7 @@ impl UpdateMetadata { /// /// Abort(non-retry): /// - TableRoute or RegionRoute is not found. - /// Typically, it's impossible, there is no other DDL procedure executed concurrently for the current table. + /// Typically, it's impossible, there is no other DDL procedure executed concurrently for the current table. /// /// Retry: /// - Failed to update [TableRouteValue](common_meta::key::table_region::TableRegionValue). diff --git a/src/meta-srv/src/selector/weighted_choose.rs b/src/meta-srv/src/selector/weighted_choose.rs index 9e9f63abadfd..d3a555043285 100644 --- a/src/meta-srv/src/selector/weighted_choose.rs +++ b/src/meta-srv/src/selector/weighted_choose.rs @@ -27,6 +27,7 @@ pub trait WeightedChoose: Send + Sync { /// Note: /// 1. make sure weight_array is not empty. /// 2. the total weight is greater than 0. + /// /// Otherwise an error will be returned. fn set_weight_array(&mut self, weight_array: Vec>) -> Result<()>; diff --git a/src/meta-srv/src/service/store/cached_kv.rs b/src/meta-srv/src/service/store/cached_kv.rs index 8bf02dc0471e..ddf7b3e516f6 100644 --- a/src/meta-srv/src/service/store/cached_kv.rs +++ b/src/meta-srv/src/service/store/cached_kv.rs @@ -61,9 +61,9 @@ impl CheckLeader for RwLock { /// To use this cache, the following constraints must be followed: /// 1. The leader node can create this metadata. /// 2. The follower node can create this metadata. The leader node can lazily retrieve -/// the corresponding data through the caching loading mechanism. +/// the corresponding data through the caching loading mechanism. /// 3. Only the leader node can update this metadata, as the cache cannot detect -/// modifications made to the data on the follower node. +/// modifications made to the data on the follower node. /// 4. Only the leader node can delete this metadata for the same reason mentioned above. pub struct LeaderCachedKvBackend { check_leader: CheckLeaderRef, diff --git a/src/mito2/src/read/merge.rs b/src/mito2/src/read/merge.rs index 3a6173360ba2..b82738808016 100644 --- a/src/mito2/src/read/merge.rs +++ b/src/mito2/src/read/merge.rs @@ -31,7 +31,7 @@ use crate::read::{Batch, BatchReader, BoxedBatchReader, Source}; /// /// The merge reader merges [Batch]es from multiple sources that yield sorted batches. /// 1. Batch is ordered by primary key, time index, sequence desc, op type desc (we can -/// ignore op type as sequence is already unique). +/// ignore op type as sequence is already unique). /// 2. Batches from sources **must** not be empty. /// /// The reader won't concatenate batches. Each batch returned by the reader also doesn't diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs index dcc461ab0991..8985baae7064 100644 --- a/src/mito2/src/test_util.rs +++ b/src/mito2/src/test_util.rs @@ -960,6 +960,7 @@ pub fn build_rows(start: usize, end: usize) -> Vec { /// - `key`: A string key that is common across all rows. /// - `timestamps`: Array of timestamp values. /// - `fields`: Array of tuples where each tuple contains two optional i64 values, representing two optional float fields. +/// /// Returns a vector of `Row` each containing the key, two optional float fields, and a timestamp. pub fn build_rows_with_fields( key: &str, diff --git a/src/object-store/src/layers/prometheus.rs b/src/object-store/src/layers/prometheus.rs index 29897db98711..fef83a91468a 100644 --- a/src/object-store/src/layers/prometheus.rs +++ b/src/object-store/src/layers/prometheus.rs @@ -159,9 +159,8 @@ impl LayeredAccess for PrometheusAccess { let create_res = self.inner.create_dir(path, args).await; timer.observe_duration(); - create_res.map_err(|e| { + create_res.inspect_err(|e| { increment_errors_total(Operation::CreateDir, e.kind()); - e }) } @@ -175,9 +174,8 @@ impl LayeredAccess for PrometheusAccess { .with_label_values(&[&self.scheme, Operation::Read.into_static(), path_label]) .start_timer(); - let (rp, r) = self.inner.read(path, args).await.map_err(|e| { + let (rp, r) = self.inner.read(path, args).await.inspect_err(|e| { increment_errors_total(Operation::Read, e.kind()); - e })?; Ok(( @@ -205,9 +203,8 @@ impl LayeredAccess for PrometheusAccess { .with_label_values(&[&self.scheme, Operation::Write.into_static(), path_label]) .start_timer(); - let (rp, r) = self.inner.write(path, args).await.map_err(|e| { + let (rp, r) = self.inner.write(path, args).await.inspect_err(|e| { increment_errors_total(Operation::Write, e.kind()); - e })?; Ok(( @@ -236,9 +233,8 @@ impl LayeredAccess for PrometheusAccess { let stat_res = self.inner.stat(path, args).await; timer.observe_duration(); - stat_res.map_err(|e| { + stat_res.inspect_err(|e| { increment_errors_total(Operation::Stat, e.kind()); - e }) } @@ -254,9 +250,8 @@ impl LayeredAccess for PrometheusAccess { let delete_res = self.inner.delete(path, args).await; timer.observe_duration(); - delete_res.map_err(|e| { + delete_res.inspect_err(|e| { increment_errors_total(Operation::Delete, e.kind()); - e }) } @@ -273,9 +268,8 @@ impl LayeredAccess for PrometheusAccess { let list_res = self.inner.list(path, args).await; timer.observe_duration(); - list_res.map_err(|e| { + list_res.inspect_err(|e| { increment_errors_total(Operation::List, e.kind()); - e }) } @@ -290,9 +284,8 @@ impl LayeredAccess for PrometheusAccess { let result = self.inner.batch(args).await; timer.observe_duration(); - result.map_err(|e| { + result.inspect_err(|e| { increment_errors_total(Operation::Batch, e.kind()); - e }) } @@ -308,9 +301,8 @@ impl LayeredAccess for PrometheusAccess { let result = self.inner.presign(path, args).await; timer.observe_duration(); - result.map_err(|e| { + result.inspect_err(|e| { increment_errors_total(Operation::Presign, e.kind()); - e }) } @@ -335,9 +327,8 @@ impl LayeredAccess for PrometheusAccess { timer.observe_duration(); - result.map_err(|e| { + result.inspect_err(|e| { increment_errors_total(Operation::BlockingCreateDir, e.kind()); - e }) } @@ -376,9 +367,8 @@ impl LayeredAccess for PrometheusAccess { ), ) }) - .map_err(|e| { + .inspect_err(|e| { increment_errors_total(Operation::BlockingRead, e.kind()); - e }) } @@ -417,9 +407,8 @@ impl LayeredAccess for PrometheusAccess { ), ) }) - .map_err(|e| { + .inspect_err(|e| { increment_errors_total(Operation::BlockingWrite, e.kind()); - e }) } @@ -442,9 +431,8 @@ impl LayeredAccess for PrometheusAccess { .start_timer(); let result = self.inner.blocking_stat(path, args); timer.observe_duration(); - result.map_err(|e| { + result.inspect_err(|e| { increment_errors_total(Operation::BlockingStat, e.kind()); - e }) } @@ -468,9 +456,8 @@ impl LayeredAccess for PrometheusAccess { let result = self.inner.blocking_delete(path, args); timer.observe_duration(); - result.map_err(|e| { + result.inspect_err(|e| { increment_errors_total(Operation::BlockingDelete, e.kind()); - e }) } @@ -494,9 +481,8 @@ impl LayeredAccess for PrometheusAccess { let result = self.inner.blocking_list(path, args); timer.observe_duration(); - result.map_err(|e| { + result.inspect_err(|e| { increment_errors_total(Operation::BlockingList, e.kind()); - e }) } } @@ -535,18 +521,16 @@ impl PrometheusMetricWrapper { impl oio::Read for PrometheusMetricWrapper { async fn read(&mut self) -> Result { - self.inner.read().await.map_err(|err| { + self.inner.read().await.inspect_err(|err| { increment_errors_total(self.op, err.kind()); - err }) } } impl oio::BlockingRead for PrometheusMetricWrapper { fn read(&mut self) -> opendal::Result { - self.inner.read().map_err(|err| { + self.inner.read().inspect_err(|err| { increment_errors_total(self.op, err.kind()); - err }) } } @@ -567,16 +551,14 @@ impl oio::Write for PrometheusMetricWrapper { } async fn close(&mut self) -> Result<()> { - self.inner.close().await.map_err(|err| { + self.inner.close().await.inspect_err(|err| { increment_errors_total(self.op, err.kind()); - err }) } async fn abort(&mut self) -> Result<()> { - self.inner.close().await.map_err(|err| { + self.inner.close().await.inspect_err(|err| { increment_errors_total(self.op, err.kind()); - err }) } } @@ -589,16 +571,14 @@ impl oio::BlockingWrite for PrometheusMetricWrapper { .map(|_| { self.bytes += bytes as u64; }) - .map_err(|err| { + .inspect_err(|err| { increment_errors_total(self.op, err.kind()); - err }) } fn close(&mut self) -> Result<()> { - self.inner.close().map_err(|err| { + self.inner.close().inspect_err(|err| { increment_errors_total(self.op, err.kind()); - err }) } } diff --git a/src/promql/src/extension_plan/empty_metric.rs b/src/promql/src/extension_plan/empty_metric.rs index 302510e63282..8478b5263646 100644 --- a/src/promql/src/extension_plan/empty_metric.rs +++ b/src/promql/src/extension_plan/empty_metric.rs @@ -47,7 +47,7 @@ use crate::extension_plan::Millisecond; /// Empty source plan that generate record batch with two columns: /// - time index column, computed from start, end and interval /// - value column, generated by the input expr. The expr should not -/// reference any column except the time index column. +/// reference any column except the time index column. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct EmptyMetric { start: Millisecond, diff --git a/src/promql/src/functions/holt_winters.rs b/src/promql/src/functions/holt_winters.rs index c063b02b7358..8196d00a7bf4 100644 --- a/src/promql/src/functions/holt_winters.rs +++ b/src/promql/src/functions/holt_winters.rs @@ -32,11 +32,12 @@ use crate::range_array::RangeArray; /// There are 3 variants of smoothing functions: /// 1) "Simple exponential smoothing": only the `level` component (the weighted average of the observations) is used to make forecasts. -/// This method is applied for time-series data that does not exhibit trend or seasonality. +/// This method is applied for time-series data that does not exhibit trend or seasonality. /// 2) "Holt's linear method" (a.k.a. "double exponential smoothing"): `level` and `trend` components are used to make forecasts. -/// This method is applied for time-series data that exhibits trend but not seasonality. +/// This method is applied for time-series data that exhibits trend but not seasonality. /// 3) "Holt-Winter's method" (a.k.a. "triple exponential smoothing"): `level`, `trend`, and `seasonality` are used to make forecasts. -/// This method is applied for time-series data that exhibits both trend and seasonality. +/// +/// This method is applied for time-series data that exhibits both trend and seasonality. /// /// In order to keep the parity with the Prometheus functions we had to follow the same naming ("HoltWinters"), however /// the "Holt's linear"("double exponential smoothing") suits better and reflects implementation. diff --git a/src/puffin/src/file_format.rs b/src/puffin/src/file_format.rs index 2cb77c8c242d..aa273f94e9b9 100644 --- a/src/puffin/src/file_format.rs +++ b/src/puffin/src/file_format.rs @@ -34,7 +34,7 @@ //! - bit 0 (lowest bit): whether `FooterPayload` is compressed //! - all other bits are reserved for future use and should be set to 0 on write //! * all other bytes are reserved for future use and should be set to 0 on write -//! A 4 byte integer is always signed, in a two’s complement representation, stored little-endian. +//! A 4 byte integer is always signed, in a two’s complement representation, stored little-endian. //! //! ## Footer Payload //! diff --git a/src/query/src/promql/planner.rs b/src/query/src/promql/planner.rs index 02af7c28987e..964fd50e7c06 100644 --- a/src/query/src/promql/planner.rs +++ b/src/query/src/promql/planner.rs @@ -666,6 +666,7 @@ impl PromPlanner { /// Name rule: /// - if `name` is some, then the matchers MUST NOT contain `__name__` matcher. /// - if `name` is none, then the matchers MAY contain NONE OR MULTIPLE `__name__` matchers. + #[allow(clippy::mutable_key_type)] fn preprocess_label_matchers( &mut self, label_matchers: &Matchers, diff --git a/src/query/src/query_engine/context.rs b/src/query/src/query_engine/context.rs index 433c39119eb0..d8c110d2f27c 100644 --- a/src/query/src/query_engine/context.rs +++ b/src/query/src/query_engine/context.rs @@ -70,7 +70,7 @@ impl QueryEngineContext { } /// Mock an engine context for unit tests. - #[cfg(any(test, feature = "test"))] + #[cfg(test)] pub fn mock() -> Self { use common_base::Plugins; use session::context::QueryContext; diff --git a/src/query/src/sql.rs b/src/query/src/sql.rs index 6b2e565c1fa6..4aa6dca17a62 100644 --- a/src/query/src/sql.rs +++ b/src/query/src/sql.rs @@ -1018,9 +1018,9 @@ pub fn file_column_schemas_to_table( /// /// More specifically, for each column seen in the table schema, /// - If the same column does exist in the file schema, it checks if the data -/// type of the file column can be casted into the form of the table column. +/// type of the file column can be casted into the form of the table column. /// - If the same column does not exist in the file schema, it checks if the -/// table column is nullable or has a default constraint. +/// table column is nullable or has a default constraint. pub fn check_file_to_table_schema_compatibility( file_column_schemas: &[ColumnSchema], table_column_schemas: &[ColumnSchema], diff --git a/src/script/benches/py_benchmark.rs b/src/script/benches/py_benchmark.rs index b5c3d590d437..1750aadcebf8 100644 --- a/src/script/benches/py_benchmark.rs +++ b/src/script/benches/py_benchmark.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::any::Any; use std::collections::HashMap; use std::sync::Arc; @@ -31,8 +32,9 @@ use tokio::runtime::Runtime; static SCRIPT_ENGINE: Lazy = Lazy::new(sample_script_engine); static LOCAL_RUNTIME: OnceCell = OnceCell::new(); fn get_local_runtime() -> std::thread::Result<&'static Runtime> { - let rt = LOCAL_RUNTIME - .get_or_try_init(|| tokio::runtime::Runtime::new().map_err(|e| Box::new(e) as _))?; + let rt = LOCAL_RUNTIME.get_or_try_init(|| { + tokio::runtime::Runtime::new().map_err(|e| Box::new(e) as Box) + })?; Ok(rt) } /// a terrible hack to call async from sync by: diff --git a/src/script/src/python/pyo3/copr_impl.rs b/src/script/src/python/pyo3/copr_impl.rs index 2025c9d3a27d..bc8c8582d74b 100644 --- a/src/script/src/python/pyo3/copr_impl.rs +++ b/src/script/src/python/pyo3/copr_impl.rs @@ -175,7 +175,7 @@ coprocessor = copr /// constants will be broadcast to length of `col_len` /// accept and convert if obj is of two types: /// 1. tuples of PyVector/PyList of literals/single literal of same type -/// or a mixed tuple of PyVector and PyList of same type Literals +/// or a mixed tuple of PyVector and PyList of same type Literals /// 2. a single PyVector /// 3. a PyList of same type Literals /// 4. a single constant, will be expanded to a PyVector of length of `col_len` diff --git a/src/servers/src/grpc/greptime_handler.rs b/src/servers/src/grpc/greptime_handler.rs index 2abf5efaf8a7..f6bafde16d8d 100644 --- a/src/servers/src/grpc/greptime_handler.rs +++ b/src/servers/src/grpc/greptime_handler.rs @@ -110,9 +110,8 @@ impl GreptimeRequestHandler { .spawn(result_future) .await .context(JoinTaskSnafu) - .map_err(|e| { + .inspect_err(|e| { timer.record(e.status_code()); - e })? } None => result_future.await, @@ -160,11 +159,10 @@ pub(crate) async fn auth( name: "Token AuthScheme".to_string(), }), } - .map_err(|e| { + .inspect_err(|e| { METRIC_AUTH_FAILURE .with_label_values(&[e.status_code().as_ref()]) .inc(); - e }) } diff --git a/src/servers/src/metrics/jemalloc.rs b/src/servers/src/metrics/jemalloc.rs index 6b977301360f..0efb7cc031c9 100644 --- a/src/servers/src/metrics/jemalloc.rs +++ b/src/servers/src/metrics/jemalloc.rs @@ -42,11 +42,10 @@ pub(crate) static JEMALLOC_COLLECTOR: Lazy> = Lazy::ne e }) .ok(); - collector.map(|c| { + collector.inspect(|c| { if let Err(e) = c.update() { error!(e; "Failed to update jemalloc metrics"); }; - c }) }); diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs index 88fdc20f03bd..63faa22d54c3 100644 --- a/src/servers/src/mysql/federated.rs +++ b/src/servers/src/mysql/federated.rs @@ -196,7 +196,7 @@ fn select_variable(query: &str, query_context: QueryContextRef) -> Option /// - since the name are case-insensitive, we transform them to lowercase for -/// better sql usability +/// better sql usability /// - replace `.` and `-` with `_` fn normalize_otlp_name(name: &str) -> String { - name.to_lowercase().replace(|c| c == '.' || c == '-', "_") + name.to_lowercase().replace(['.', '-'], "_") } /// Convert OpenTelemetry metrics to GreptimeDB insert requests @@ -174,7 +174,7 @@ fn encode_gauge( scope_attrs: Option<&Vec>, ) -> Result<()> { let table = table_writer.get_or_default_table_data( - &normalize_otlp_name(name), + normalize_otlp_name(name), APPROXIMATE_COLUMN_COUNT, gauge.data_points.len(), ); @@ -208,7 +208,7 @@ fn encode_sum( scope_attrs: Option<&Vec>, ) -> Result<()> { let table = table_writer.get_or_default_table_data( - &normalize_otlp_name(name), + normalize_otlp_name(name), APPROXIMATE_COLUMN_COUNT, sum.data_points.len(), ); @@ -237,7 +237,7 @@ const HISTOGRAM_LE_COLUMN: &str = "le"; /// The implementation has been following Prometheus histogram table format: /// /// - A `%metric%_bucket` table including `greptime_le` tag that stores bucket upper -/// limit, and `greptime_value` for bucket count +/// limit, and `greptime_value` for bucket count /// - A `%metric%_sum` table storing sum of samples /// - A `%metric%_count` table storing count of samples. /// @@ -358,7 +358,7 @@ fn encode_summary( scope_attrs: Option<&Vec>, ) -> Result<()> { let table = table_writer.get_or_default_table_data( - &normalize_otlp_name(name), + normalize_otlp_name(name), APPROXIMATE_COLUMN_COUNT, summary.data_points.len(), ); @@ -377,7 +377,7 @@ fn encode_summary( for quantile in &data_point.quantile_values { row_writer::write_f64( table, - &format!("greptime_p{:02}", quantile.quantile * 100f64), + format!("greptime_p{:02}", quantile.quantile * 100f64), quantile.value, &mut row, )?; diff --git a/src/sql/src/lib.rs b/src/sql/src/lib.rs index 283ebb50ec32..47fcf72cb7a7 100644 --- a/src/sql/src/lib.rs +++ b/src/sql/src/lib.rs @@ -15,7 +15,6 @@ #![feature(box_patterns)] #![feature(assert_matches)] #![feature(let_chains)] -#![feature(lazy_cell)] pub mod ast; pub mod dialect; diff --git a/src/sql/src/statements/transform/expand_interval.rs b/src/sql/src/statements/transform/expand_interval.rs index 09d0713a8d54..084a938c8f78 100644 --- a/src/sql/src/statements/transform/expand_interval.rs +++ b/src/sql/src/statements/transform/expand_interval.rs @@ -57,8 +57,9 @@ lazy_static! { /// - `ms` for `milliseconds` /// - `us` for `microseconds` /// - `ns` for `nanoseconds` +/// /// Required for scenarios that use the shortened version of `INTERVAL`, -/// f.e `SELECT INTERVAL '1h'` or `SELECT INTERVAL '3w2d'` +/// f.e `SELECT INTERVAL '1h'` or `SELECT INTERVAL '3w2d'` pub(crate) struct ExpandIntervalTransformRule; impl TransformRule for ExpandIntervalTransformRule { @@ -145,10 +146,11 @@ fn update_existing_interval_with_value(interval: &Interval, value: Box) -> /// Normalizes an interval expression string into the sql-compatible format. /// This function handles 2 types of input: /// 1. Abbreviated interval strings (e.g., "1y2mo3d") -/// Returns an interval's full name (e.g., "years", "hours", "minutes") according to the `INTERVAL_ABBREVIATION_MAPPING` -/// If the `interval_str` contains whitespaces, the interval name is considered to be in a full form. +/// Returns an interval's full name (e.g., "years", "hours", "minutes") according to the `INTERVAL_ABBREVIATION_MAPPING` +/// If the `interval_str` contains whitespaces, the interval name is considered to be in a full form. /// 2. ISO 8601 format strings (e.g., "P1Y2M3D"), case/sign independent -/// Returns a number of milliseconds corresponding to ISO 8601 (e.g., "36525000 milliseconds") +/// Returns a number of milliseconds corresponding to ISO 8601 (e.g., "36525000 milliseconds") +/// /// Note: Hybrid format "1y 2 days 3h" is not supported. fn normalize_interval_name(interval_str: &str) -> Option { if interval_str.contains(char::is_whitespace) {