Skip to content

Commit

Permalink
chore: upgrade toolchain to nightly-2024-08-07 (#4549)
Browse files Browse the repository at this point in the history
* chore: upgrade toolchain to `nightly-2024-08-07`

* chore(ci): upgrade toolchain

* fix: fix unit test
  • Loading branch information
WenyXu authored Aug 22, 2024
1 parent 3517c13 commit 25cd61b
Show file tree
Hide file tree
Showing 44 changed files with 90 additions and 108 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/apidoc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ on:
name: Build API docs

env:
RUST_TOOLCHAIN: nightly-2024-04-20
RUST_TOOLCHAIN: nightly-2024-08-07

jobs:
apidoc:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/develop.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ concurrency:
cancel-in-progress: true

env:
RUST_TOOLCHAIN: nightly-2024-04-20
RUST_TOOLCHAIN: nightly-2024-08-07

jobs:
check-typos-and-docs:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/nightly-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true

env:
RUST_TOOLCHAIN: nightly-2024-04-20
RUST_TOOLCHAIN: nightly-2024-08-07

permissions:
issues: write
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ on:
# Use env variables to control all the release process.
env:
# The arguments of building greptime.
RUST_TOOLCHAIN: nightly-2024-04-20
RUST_TOOLCHAIN: nightly-2024-08-07
CARGO_PROFILE: nightly

# Controls whether to run tests, include unit-test, integration-test and sqlness.
Expand Down
4 changes: 2 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }

[workspace.dependencies]
# We turn off default-features for some dependencies here so the workspaces which inherit them can
Expand Down
3 changes: 2 additions & 1 deletion rust-toolchain.toml
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
[toolchain]
channel = "nightly-2024-04-20"
channel = "nightly-2024-08-07"

2 changes: 1 addition & 1 deletion src/client/src/database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ impl Database {
///
/// - the name of database when using GreptimeDB standalone or cluster
/// - the name provided by GreptimeCloud or other multi-tenant GreptimeDB
/// environment
/// environment
pub fn new_with_dbname(dbname: impl Into<String>, client: Client) -> Self {
Self {
catalog: String::default(),
Expand Down
2 changes: 1 addition & 1 deletion src/cmd/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ file-engine.workspace = true
flow.workspace = true
frontend = { workspace = true, default-features = false }
futures.workspace = true
human-panic = "1.2.2"
human-panic = "2.0"
lazy_static.workspace = true
meta-client.workspace = true
meta-srv.workspace = true
Expand Down
11 changes: 4 additions & 7 deletions src/cmd/src/bin/greptime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,13 +139,10 @@ async fn start(cli: Command) -> Result<()> {
}

fn setup_human_panic() {
let metadata = human_panic::Metadata {
version: env!("CARGO_PKG_VERSION").into(),
name: "GreptimeDB".into(),
authors: Default::default(),
homepage: "https://github.com/GreptimeTeam/greptimedb/discussions".into(),
};
human_panic::setup_panic!(metadata);
human_panic::setup_panic!(
human_panic::Metadata::new("GreptimeDB", env!("CARGO_PKG_VERSION"))
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions")
);

common_telemetry::set_panic_hook();
}
8 changes: 4 additions & 4 deletions src/common/catalog/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,19 +48,19 @@ pub fn build_db_string(catalog: &str, schema: &str) -> String {
/// The database name may come from different sources:
///
/// - MySQL `schema` name in MySQL protocol login request: it's optional and user
/// and switch database using `USE` command
/// and switch database using `USE` command
/// - Postgres `database` parameter in Postgres wire protocol, required
/// - HTTP RESTful API: the database parameter, optional
/// - gRPC: the dbname field in header, optional but has a higher priority than
/// original catalog/schema
/// original catalog/schema
///
/// When database name is provided, we attempt to parse catalog and schema from
/// it. We assume the format `[<catalog>-]<schema>`:
///
/// - If `[<catalog>-]` part is not provided, we use whole database name as
/// schema name
/// schema name
/// - if `[<catalog>-]` is provided, we split database name with `-` and use
/// `<catalog>` and `<schema>`.
/// `<catalog>` and `<schema>`.
pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (String, String) {
match parse_optional_catalog_and_schema_from_db_string(db) {
(Some(catalog), schema) => (catalog, schema),
Expand Down
2 changes: 1 addition & 1 deletion src/common/function/src/function.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ pub struct FunctionContext {

impl FunctionContext {
/// Create a mock [`FunctionContext`] for test.
#[cfg(any(test, feature = "testing"))]
#[cfg(test)]
pub fn mock() -> Self {
Self {
query_ctx: QueryContextBuilder::default().build().into(),
Expand Down
2 changes: 1 addition & 1 deletion src/common/function/src/scalars/aggregate/percentile.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ where
// to keep the not_greater length == floor+1
// so to ensure the peek of the not_greater is array[floor]
// and the peek of the greater is array[floor+1]
let p = if let Some(p) = self.p { p } else { 0.0_f64 };
let p = self.p.unwrap_or(0.0_f64);
let floor = (((self.n - 1) as f64) * p / (100_f64)).floor();
if value <= *self.not_greater.peek().unwrap() {
self.not_greater.push(value);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ mod test {
];
scipy_stats_norm_pdf.update_batch(&v).unwrap();
assert_eq!(
Value::from(0.17843340219081558),
Value::from(0.17843340219081552),
scipy_stats_norm_pdf.evaluate().unwrap()
);

Expand Down
2 changes: 1 addition & 1 deletion src/common/function/src/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ pub struct FunctionState {

impl FunctionState {
/// Create a mock [`FunctionState`] for test.
#[cfg(any(test, feature = "testing"))]
#[cfg(test)]
pub fn mock() -> Self {
use std::sync::Arc;

Expand Down
1 change: 1 addition & 0 deletions src/common/macro/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
/// - `&ProcedureServiceHandlerRef` or `&TableMutationHandlerRef` or `FlowServiceHandlerRef` as the first argument,
/// - `&QueryContextRef` as the second argument, and
/// - `&[ValueRef<'_>]` as the third argument which is SQL function input values in each row.
///
/// Return type must be `common_query::error::Result<Value>`.
///
/// # Example see `common/function/src/system/procedure_state.rs`.
Expand Down
4 changes: 2 additions & 2 deletions src/common/meta/src/lock_key.rs
Original file line number Diff line number Diff line change
Expand Up @@ -172,8 +172,8 @@ impl From<TableLock> for StringKey {
///
/// Note:
/// - Allows modification the corresponding region's [TableRouteValue](crate::key::table_route::TableRouteValue),
/// [TableDatanodeValue](crate::key::datanode_table::DatanodeTableValue) even if
/// it acquires the [RegionLock::Write] only without acquiring the [TableLock::Write].
/// [TableDatanodeValue](crate::key::datanode_table::DatanodeTableValue) even if
/// it acquires the [RegionLock::Write] only without acquiring the [TableLock::Write].
///
/// - Should acquire [TableLock] of the table at same procedure.
///
Expand Down
2 changes: 1 addition & 1 deletion src/common/procedure/src/local.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ const META_TTL: Duration = Duration::from_secs(60 * 10);
/// [Notify] is not a condition variable, we can't guarantee the waiters are notified
/// if they didn't call `notified()` before we signal the notify. So we
/// 1. use dedicated notify for each condition, such as waiting for a lock, waiting
/// for children;
/// for children;
/// 2. always use `notify_one` and ensure there are only one waiter.
#[derive(Debug)]
pub(crate) struct ProcedureMeta {
Expand Down
2 changes: 0 additions & 2 deletions src/common/test-util/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#![feature(lazy_cell)]

use std::path::{Path, PathBuf};
use std::process::Command;
use std::sync::LazyLock;
Expand Down
1 change: 0 additions & 1 deletion src/flow/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
//! It also contains definition of expression, adapter and plan, and internal state management.
#![feature(let_chains)]
#![feature(duration_abs_diff)]
#![allow(dead_code)]
#![warn(clippy::missing_docs_in_private_items)]
#![warn(clippy::too_many_lines)]
Expand Down
3 changes: 2 additions & 1 deletion src/log-store/src/kafka/util/record.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,9 @@ pub(crate) const ESTIMATED_META_SIZE: usize = 256;
/// - If the entry is able to fit into a Kafka record, it's converted into a Full record.
///
/// - If the entry is too large to fit into a Kafka record, it's converted into a collection of records.
///
/// Those records must contain exactly one First record and one Last record, and potentially several
/// Middle records. There may be no Middle record.
/// Middle records. There may be no Middle record.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub enum RecordType {
/// The record is self-contained, i.e. an entry's data is fully stored into this record.
Expand Down
1 change: 1 addition & 0 deletions src/log-store/src/raft_engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ use store_api::logstore::entry::{Entry, NaiveEntry};
use store_api::logstore::provider::Provider;
use store_api::storage::RegionId;

#[allow(renamed_and_removed_lints)]
pub mod protos {
include!(concat!(env!("OUT_DIR"), concat!("/", "protos/", "mod.rs")));
}
Expand Down
1 change: 0 additions & 1 deletion src/meta-srv/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
#![feature(async_closure)]
#![feature(result_flattening)]
#![feature(assert_matches)]
#![feature(option_take_if)]
#![feature(extract_if)]

pub mod bootstrap;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ impl UpdateMetadata {
/// About the failure of updating the [TableRouteValue](common_meta::key::table_region::TableRegionValue):
///
/// - There may be another [RegionMigrationProcedure](crate::procedure::region_migration::RegionMigrationProcedure)
/// that is executed concurrently for **other region**.
/// It will only update **other region** info. Therefore, It's safe to retry after failure.
/// that is executed concurrently for **other region**.
/// It will only update **other region** info. Therefore, It's safe to retry after failure.
///
/// - There is no other DDL procedure executed concurrently for the current table.
pub async fn downgrade_leader_region(&self, ctx: &mut Context) -> Result<()> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ impl UpdateMetadata {
///
/// Abort(non-retry):
/// - TableRoute or RegionRoute is not found.
/// Typically, it's impossible, there is no other DDL procedure executed concurrently for the current table.
/// Typically, it's impossible, there is no other DDL procedure executed concurrently for the current table.
///
/// Retry:
/// - Failed to update [TableRouteValue](common_meta::key::table_region::TableRegionValue).
Expand Down
1 change: 1 addition & 0 deletions src/meta-srv/src/selector/weighted_choose.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ pub trait WeightedChoose<Item>: Send + Sync {
/// Note:
/// 1. make sure weight_array is not empty.
/// 2. the total weight is greater than 0.
///
/// Otherwise an error will be returned.
fn set_weight_array(&mut self, weight_array: Vec<WeightedItem<Item>>) -> Result<()>;

Expand Down
4 changes: 2 additions & 2 deletions src/meta-srv/src/service/store/cached_kv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,9 @@ impl CheckLeader for RwLock<State> {
/// To use this cache, the following constraints must be followed:
/// 1. The leader node can create this metadata.
/// 2. The follower node can create this metadata. The leader node can lazily retrieve
/// the corresponding data through the caching loading mechanism.
/// the corresponding data through the caching loading mechanism.
/// 3. Only the leader node can update this metadata, as the cache cannot detect
/// modifications made to the data on the follower node.
/// modifications made to the data on the follower node.
/// 4. Only the leader node can delete this metadata for the same reason mentioned above.
pub struct LeaderCachedKvBackend {
check_leader: CheckLeaderRef,
Expand Down
2 changes: 1 addition & 1 deletion src/mito2/src/read/merge.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ use crate::read::{Batch, BatchReader, BoxedBatchReader, Source};
///
/// The merge reader merges [Batch]es from multiple sources that yield sorted batches.
/// 1. Batch is ordered by primary key, time index, sequence desc, op type desc (we can
/// ignore op type as sequence is already unique).
/// ignore op type as sequence is already unique).
/// 2. Batches from sources **must** not be empty.
///
/// The reader won't concatenate batches. Each batch returned by the reader also doesn't
Expand Down
1 change: 1 addition & 0 deletions src/mito2/src/test_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -960,6 +960,7 @@ pub fn build_rows(start: usize, end: usize) -> Vec<Row> {
/// - `key`: A string key that is common across all rows.
/// - `timestamps`: Array of timestamp values.
/// - `fields`: Array of tuples where each tuple contains two optional i64 values, representing two optional float fields.
///
/// Returns a vector of `Row` each containing the key, two optional float fields, and a timestamp.
pub fn build_rows_with_fields(
key: &str,
Expand Down
Loading

0 comments on commit 25cd61b

Please sign in to comment.