Skip to content

Commit

Permalink
Encode wether or not a chunk is sorted by RowId in the column metadata
Browse files Browse the repository at this point in the history
* Part of #8744

It used to be encoded in the `RecrodBatch` metadata.
It makes more sense to encode it on the column metadata though,
and brings the RowId column closer to the time columns
(I hope to unify the two in the near future).
  • Loading branch information
emilk committed Feb 14, 2025
1 parent 7995dd4 commit fb6eaf3
Show file tree
Hide file tree
Showing 5 changed files with 49 additions and 78 deletions.
27 changes: 11 additions & 16 deletions crates/store/re_chunk/src/transport.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ use nohash_hasher::IntMap;

use re_arrow_util::{into_arrow_ref, ArrowArrayDowncastRef as _};
use re_byte_size::SizeBytes as _;
use re_types_core::{arrow_helpers::as_array_ref, ComponentDescriptor, Loggable as _};
use re_types_core::{arrow_helpers::as_array_ref, ComponentDescriptor};

use crate::{chunk::ChunkComponents, Chunk, ChunkError, ChunkResult, RowId, TimeColumn};
use crate::{chunk::ChunkComponents, Chunk, ChunkError, ChunkResult, TimeColumn};

// ---

Expand Down Expand Up @@ -45,7 +45,9 @@ impl Chunk {
components,
} = self;

let row_id_schema = re_sorbet::RowIdColumnDescriptor::try_from(RowId::arrow_datatype())?;
let row_id_schema = re_sorbet::RowIdColumnDescriptor {
is_sorted: *is_sorted,
};

let (index_schemas, index_arrays): (Vec<_>, Vec<_>) = {
re_tracing::profile_scope!("timelines");
Expand Down Expand Up @@ -124,8 +126,7 @@ impl Chunk {
index_schemas,
data_schemas,
)
.with_heap_size_bytes(heap_size_bytes)
.with_sorted(*is_sorted);
.with_heap_size_bytes(heap_size_bytes);

Ok(re_sorbet::ChunkBatch::try_new(
schema,
Expand All @@ -151,13 +152,6 @@ impl Chunk {
batch.num_rows()
));

// Metadata
let (id, entity_path, is_sorted) = (
batch.chunk_id(),
batch.entity_path().clone(),
batch.is_sorted(),
);

let row_ids = batch.row_id_column().1.clone();

let timelines = {
Expand Down Expand Up @@ -224,10 +218,11 @@ impl Chunk {
components
};

let is_sorted_by_row_id = batch.chunk_schema().row_id_column().is_sorted;
let mut res = Self::new(
id,
entity_path,
is_sorted.then_some(true),
batch.chunk_id(),
batch.entity_path().clone(),
is_sorted_by_row_id.then_some(true),
row_ids,
timelines,
components,
Expand Down Expand Up @@ -277,7 +272,7 @@ mod tests {
example_components::{MyColor, MyPoint},
EntityPath, Timeline,
};
use re_types_core::{ChunkId, Component as _};
use re_types_core::{ChunkId, Component as _, Loggable as _, RowId};

use super::*;

Expand Down
13 changes: 0 additions & 13 deletions crates/store/re_sorbet/src/chunk_schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ impl ChunkSchema {
chunk_id: Some(chunk_id),
entity_path: Some(entity_path.clone()),
heap_size_bytes: None,
is_sorted: false, // assume the worst
},
row_id,
chunk_id,
Expand All @@ -73,12 +72,6 @@ impl ChunkSchema {
self.sorbet.heap_size_bytes = Some(heap_size_bytes);
self
}

#[inline]
pub fn with_sorted(mut self, sorted: bool) -> Self {
self.sorbet.is_sorted = sorted;
self
}
}

/// ## Accessors
Expand All @@ -101,12 +94,6 @@ impl ChunkSchema {
self.sorbet.heap_size_bytes
}

/// Are we sorted by the row id column?
#[inline]
pub fn is_sorted(&self) -> bool {
self.sorbet.is_sorted
}

/// Total number of columns in this chunk,
/// including the row id column, the index columns,
/// and the data columns.
Expand Down
66 changes: 37 additions & 29 deletions crates/store/re_sorbet/src/row_id_column_descriptor.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
use arrow::datatypes::{DataType as ArrowDatatype, Field as ArrowField};
use re_types_core::{Component as _, Loggable as _, RowId};

use crate::MetadataExt as _;

#[derive(thiserror::Error, Debug)]
#[error("Wrong datatype. Expected {expected:?}, got {actual:?}")]
pub struct WrongDatatypeError {
Expand All @@ -25,30 +27,32 @@ impl WrongDatatypeError {
}

/// Describes the schema of the primary [`RowId`] column.
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct RowIdColumnDescriptor {}
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct RowIdColumnDescriptor {
/// Are the values in this column sorted?
pub is_sorted: bool,
}

impl RowIdColumnDescriptor {
#[inline]
pub fn new() -> Self {
Self {}
pub fn from_sorted(is_sorted: bool) -> Self {
Self { is_sorted }
}

#[inline]
pub fn to_arrow_field(&self) -> ArrowField {
let Self {} = self;
let Self { is_sorted } = self;

let metadata = [
Some(("rerun.kind".to_owned(), "control".to_owned())),
// This ensures the RowId/Tuid is formatted correctly:
Some((
let mut metadata = std::collections::HashMap::from([
("rerun.kind".to_owned(), "index".to_owned()),
(
"ARROW:extension:name".to_owned(),
re_tuid::Tuid::ARROW_EXTENSION_NAME.to_owned(),
)),
]
.into_iter()
.flatten()
.collect();
),
]);
if *is_sorted {
metadata.insert("rerun.is_sorted".to_owned(), "true".to_owned());
}

let nullable = false; // All rows has an id
ArrowField::new(
Expand All @@ -69,24 +73,28 @@ impl TryFrom<&ArrowField> for RowIdColumnDescriptor {
type Error = WrongDatatypeError;

fn try_from(field: &ArrowField) -> Result<Self, Self::Error> {
Self::try_from(field.data_type())
// Self::try_from(field.data_type())
WrongDatatypeError::compare_expected_actual(&RowId::arrow_datatype(), field.data_type())?;
Ok(Self {
is_sorted: field.metadata().get_bool("rerun.is_sorted"),
})
}
}

impl TryFrom<&ArrowDatatype> for RowIdColumnDescriptor {
type Error = WrongDatatypeError;
// impl TryFrom<&ArrowDatatype> for RowIdColumnDescriptor {
// type Error = WrongDatatypeError;

fn try_from(data_type: &ArrowDatatype) -> Result<Self, Self::Error> {
WrongDatatypeError::compare_expected_actual(&RowId::arrow_datatype(), data_type)?;
Ok(Self {})
}
}
// fn try_from(data_type: &ArrowDatatype) -> Result<Self, Self::Error> {
// WrongDatatypeError::compare_expected_actual(&RowId::arrow_datatype(), data_type)?;
// Ok(Self {})
// }
// }

impl TryFrom<ArrowDatatype> for RowIdColumnDescriptor {
type Error = WrongDatatypeError;
// impl TryFrom<ArrowDatatype> for RowIdColumnDescriptor {
// type Error = WrongDatatypeError;

fn try_from(data_type: ArrowDatatype) -> Result<Self, Self::Error> {
WrongDatatypeError::compare_expected_actual(&RowId::arrow_datatype(), &data_type)?;
Ok(Self {})
}
}
// fn try_from(data_type: ArrowDatatype) -> Result<Self, Self::Error> {
// WrongDatatypeError::compare_expected_actual(&RowId::arrow_datatype(), &data_type)?;
// Ok(Self {})
// }
// }
6 changes: 0 additions & 6 deletions crates/store/re_sorbet/src/sorbet_batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,6 @@ impl SorbetBatch {
self.schema.heap_size_bytes
}

/// Are we sorted by the row id column?
#[inline]
pub fn is_sorted(&self) -> bool {
self.schema.is_sorted
}

#[inline]
pub fn fields(&self) -> &ArrowFields {
&self.schema_ref().fields
Expand Down
15 changes: 1 addition & 14 deletions crates/store/re_sorbet/src/sorbet_schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use arrow::datatypes::Schema as ArrowSchema;
use re_log_types::EntityPath;
use re_types_core::ChunkId;

use crate::{ArrowBatchMetadata, MetadataExt as _, SorbetColumnDescriptors, SorbetError};
use crate::{ArrowBatchMetadata, SorbetColumnDescriptors, SorbetError};

// ----------------------------------------------------------------------------

Expand All @@ -24,9 +24,6 @@ pub struct SorbetSchema {

/// The heap size of this batch in bytes, if known.
pub heap_size_bytes: Option<u64>,

/// Are we sorted by the row id column?
pub is_sorted: bool, // TODO(emilk): move to `RowIdColumnDescriptor`.
}

/// ## Metadata keys for the record batch metadata
Expand All @@ -45,12 +42,6 @@ impl SorbetSchema {
self
}

#[inline]
pub fn with_sorted(mut self, sorted_by_row_id: bool) -> Self {
self.is_sorted = sorted_by_row_id;
self
}

pub fn chunk_id_metadata(chunk_id: &ChunkId) -> (String, String) {
("rerun.id".to_owned(), format!("{:X}", chunk_id.as_u128()))
}
Expand All @@ -65,7 +56,6 @@ impl SorbetSchema {
chunk_id,
entity_path,
heap_size_bytes,
is_sorted,
} = self;

[
Expand All @@ -81,7 +71,6 @@ impl SorbetSchema {
heap_size_bytes.to_string(),
)
}),
is_sorted.then(|| ("rerun.is_sorted".to_owned(), "true".to_owned())),
]
.into_iter()
.flatten()
Expand Down Expand Up @@ -127,7 +116,6 @@ impl TryFrom<&ArrowSchema> for SorbetSchema {
None
};

let sorted_by_row_id = metadata.get_bool("rerun.is_sorted");
let heap_size_bytes = if let Some(heap_size_bytes) = metadata.get("rerun.heap_size_bytes") {
heap_size_bytes
.parse()
Expand Down Expand Up @@ -156,7 +144,6 @@ impl TryFrom<&ArrowSchema> for SorbetSchema {
chunk_id,
entity_path,
heap_size_bytes,
is_sorted: sorted_by_row_id,
})
}
}

0 comments on commit fb6eaf3

Please sign in to comment.