Skip to content

Commit

Permalink
Remove APIs deprecated on or before 49.0.0
Browse files Browse the repository at this point in the history
49 release was Nov 9, 2023. Remove the APIs that are deprecated since
then, or earlier than that.

Few such deprecated APIs remain in the code, they are still in use so
the code needs update.
  • Loading branch information
findepi committed Nov 25, 2024
1 parent 73a0c26 commit 77ae7f2
Show file tree
Hide file tree
Showing 19 changed files with 6 additions and 270 deletions.
16 changes: 1 addition & 15 deletions arrow-arith/src/temporal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ use std::sync::Arc;

use arrow_array::cast::AsArray;
use cast::as_primitive_array;
use chrono::{Datelike, NaiveDateTime, Offset, TimeZone, Timelike, Utc};
use chrono::{Datelike, TimeZone, Timelike, Utc};

use arrow_array::temporal_conversions::{
date32_to_datetime, date64_to_datetime, timestamp_ms_to_datetime, timestamp_ns_to_datetime,
Expand Down Expand Up @@ -664,20 +664,6 @@ impl<T: Datelike> ChronoDateExt for T {
}
}

/// Parse the given string into a string representing fixed-offset that is correct as of the given
/// UTC NaiveDateTime.
///
/// Note that the offset is function of time and can vary depending on whether daylight savings is
/// in effect or not. e.g. Australia/Sydney is +10:00 or +11:00 depending on DST.
#[deprecated(since = "26.0.0", note = "Use arrow_array::timezone::Tz instead")]
pub fn using_chrono_tz_and_utc_naive_date_time(
tz: &str,
utc: NaiveDateTime,
) -> Option<chrono::offset::FixedOffset> {
let tz: Tz = tz.parse().ok()?;
Some(tz.offset_from_utc_datetime(&utc).fix())
}

/// Extracts the hours of a given array as an array of integers within
/// the range of [0, 23]. If the given array isn't temporal primitive or dictionary array,
/// an `Err` will be returned.
Expand Down
6 changes: 0 additions & 6 deletions arrow-array/src/array/binary_array.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,6 @@ use arrow_schema::DataType;
pub type GenericBinaryArray<OffsetSize> = GenericByteArray<GenericBinaryType<OffsetSize>>;

impl<OffsetSize: OffsetSizeTrait> GenericBinaryArray<OffsetSize> {
/// Get the data type of the array.
#[deprecated(since = "20.0.0", note = "please use `Self::DATA_TYPE` instead")]
pub const fn get_data_type() -> DataType {
Self::DATA_TYPE
}

/// Creates a [GenericBinaryArray] from a vector of byte slices
///
/// See also [`Self::from_iter_values`]
Expand Down
18 changes: 0 additions & 18 deletions arrow-array/src/array/primitive_array.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1480,24 +1480,6 @@ def_numeric_from_vec!(TimestampMicrosecondType);
def_numeric_from_vec!(TimestampNanosecondType);

impl<T: ArrowTimestampType> PrimitiveArray<T> {
/// Construct a timestamp array from a vec of i64 values and an optional timezone
#[deprecated(since = "26.0.0", note = "Use with_timezone_opt instead")]
pub fn from_vec(data: Vec<i64>, timezone: Option<String>) -> Self
where
Self: From<Vec<i64>>,
{
Self::from(data).with_timezone_opt(timezone)
}

/// Construct a timestamp array from a vec of `Option<i64>` values and an optional timezone
#[deprecated(since = "26.0.0", note = "Use with_timezone_opt instead")]
pub fn from_opt_vec(data: Vec<Option<i64>>, timezone: Option<String>) -> Self
where
Self: From<Vec<Option<i64>>>,
{
Self::from(data).with_timezone_opt(timezone)
}

/// Returns the timezone of this array if any
pub fn timezone(&self) -> Option<&str> {
match self.data_type() {
Expand Down
8 changes: 1 addition & 7 deletions arrow-array/src/array/string_array.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,12 @@

use crate::types::GenericStringType;
use crate::{GenericBinaryArray, GenericByteArray, GenericListArray, OffsetSizeTrait};
use arrow_schema::{ArrowError, DataType};
use arrow_schema::ArrowError;

/// A [`GenericByteArray`] for storing `str`
pub type GenericStringArray<OffsetSize> = GenericByteArray<GenericStringType<OffsetSize>>;

impl<OffsetSize: OffsetSizeTrait> GenericStringArray<OffsetSize> {
/// Get the data type of the array.
#[deprecated(since = "20.0.0", note = "please use `Self::DATA_TYPE` instead")]
pub const fn get_data_type() -> DataType {
Self::DATA_TYPE
}

/// Returns the number of `Unicode Scalar Value` in the string at index `i`.
/// # Performance
/// This function has `O(n)` time complexity where `n` is the string length.
Expand Down
9 changes: 0 additions & 9 deletions arrow-array/src/cast.rs
Original file line number Diff line number Diff line change
Expand Up @@ -689,15 +689,6 @@ array_downcast_fn!(as_struct_array, StructArray);
array_downcast_fn!(as_union_array, UnionArray);
array_downcast_fn!(as_map_array, MapArray);

/// Force downcast of an Array, such as an ArrayRef to Decimal128Array, panic’ing on failure.
#[deprecated(
since = "42.0.0",
note = "please use `as_primitive_array::<Decimal128Type>` instead"
)]
pub fn as_decimal_array(arr: &dyn Array) -> &PrimitiveArray<Decimal128Type> {
as_primitive_array::<Decimal128Type>(arr)
}

/// Downcasts a `dyn Array` to a concrete type
///
/// ```
Expand Down
6 changes: 0 additions & 6 deletions arrow-array/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -324,12 +324,6 @@ pub trait ArrowTimestampType: ArrowTemporalType<Native = i64> {
/// The [`TimeUnit`] of this timestamp.
const UNIT: TimeUnit;

/// Returns the `TimeUnit` of this timestamp.
#[deprecated(since = "36.0.0", note = "Use Self::UNIT")]
fn get_time_unit() -> TimeUnit {
Self::UNIT
}

/// Creates a ArrowTimestampType::Native from the provided [`NaiveDateTime`]
///
/// See [`DataType::Timestamp`] for more information on timezone handling
Expand Down
11 changes: 0 additions & 11 deletions arrow-buffer/src/buffer/boolean.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,17 +96,6 @@ impl BooleanBuffer {
BitChunks::new(self.values(), self.offset, self.len)
}

/// Returns `true` if the bit at index `i` is set
///
/// # Panics
///
/// Panics if `i >= self.len()`
#[inline]
#[deprecated(since = "36.0.0", note = "use BooleanBuffer::value")]
pub fn is_set(&self, i: usize) -> bool {
self.value(i)
}

/// Returns the offset of this [`BooleanBuffer`] in bits
#[inline]
pub fn offset(&self) -> usize {
Expand Down
8 changes: 0 additions & 8 deletions arrow-buffer/src/buffer/immutable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -278,14 +278,6 @@ impl Buffer {
BitChunks::new(self.as_slice(), offset, len)
}

/// Returns the number of 1-bits in this buffer.
#[deprecated(since = "27.0.0", note = "use count_set_bits_offset instead")]
pub fn count_set_bits(&self) -> usize {
let len_in_bits = self.len() * 8;
// self.offset is already taken into consideration by the bit_chunks implementation
self.count_set_bits_offset(0, len_in_bits)
}

/// Returns the number of 1-bits in this buffer, starting from `offset` with `length` bits
/// inspected. Note that both `offset` and `length` are measured in bits.
pub fn count_set_bits_offset(&self, offset: usize, len: usize) -> usize {
Expand Down
7 changes: 0 additions & 7 deletions arrow-buffer/src/buffer/mutable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,13 +118,6 @@ impl MutableBuffer {
Self { data, len, layout }
}

/// Create a [`MutableBuffer`] from the provided [`Vec`] without copying
#[inline]
#[deprecated(since = "46.0.0", note = "Use From<Vec<T>>")]
pub fn from_vec<T: ArrowNativeType>(vec: Vec<T>) -> Self {
Self::from(vec)
}

/// Allocates a new [MutableBuffer] from given `Bytes`.
pub(crate) fn from_bytes(bytes: Bytes) -> Result<Self, Bytes> {
let layout = match bytes.deallocation() {
Expand Down
38 changes: 3 additions & 35 deletions arrow-buffer/src/native.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,30 +88,6 @@ pub trait ArrowNativeType:
/// Returns `None` if [`Self`] is not an integer or conversion would result
/// in truncation/overflow
fn to_i64(self) -> Option<i64>;

/// Convert native type from i32.
///
/// Returns `None` if [`Self`] is not `i32`
#[deprecated(since = "24.0.0", note = "please use `Option::Some` instead")]
fn from_i32(_: i32) -> Option<Self> {
None
}

/// Convert native type from i64.
///
/// Returns `None` if [`Self`] is not `i64`
#[deprecated(since = "24.0.0", note = "please use `Option::Some` instead")]
fn from_i64(_: i64) -> Option<Self> {
None
}

/// Convert native type from i128.
///
/// Returns `None` if [`Self`] is not `i128`
#[deprecated(since = "24.0.0", note = "please use `Option::Some` instead")]
fn from_i128(_: i128) -> Option<Self> {
None
}
}

macro_rules! native_integer {
Expand Down Expand Up @@ -147,23 +123,15 @@ macro_rules! native_integer {
fn usize_as(i: usize) -> Self {
i as _
}


$(
#[inline]
fn $from(v: $t) -> Option<Self> {
Some(v)
}
)*
}
};
}

native_integer!(i8);
native_integer!(i16);
native_integer!(i32, from_i32);
native_integer!(i64, from_i64);
native_integer!(i128, from_i128);
native_integer!(i32);
native_integer!(i64);
native_integer!(i128);
native_integer!(u8);
native_integer!(u16);
native_integer!(u32);
Expand Down
55 changes: 1 addition & 54 deletions arrow-csv/src/reader/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ use lazy_static::lazy_static;
use regex::{Regex, RegexSet};
use std::fmt::{self, Debug};
use std::fs::File;
use std::io::{BufRead, BufReader as StdBufReader, Read, Seek, SeekFrom};
use std::io::{BufRead, BufReader as StdBufReader, Read};
use std::sync::Arc;

use crate::map_csv_error;
Expand Down Expand Up @@ -399,51 +399,6 @@ impl Format {
}
}

/// Infer the schema of a CSV file by reading through the first n records of the file,
/// with `max_read_records` controlling the maximum number of records to read.
///
/// If `max_read_records` is not set, the whole file is read to infer its schema.
///
/// Return inferred schema and number of records used for inference. This function does not change
/// reader cursor offset.
///
/// The inferred schema will always have each field set as nullable.
#[deprecated(since = "39.0.0", note = "Use Format::infer_schema")]
#[allow(deprecated)]
pub fn infer_file_schema<R: Read + Seek>(
mut reader: R,
delimiter: u8,
max_read_records: Option<usize>,
has_header: bool,
) -> Result<(Schema, usize), ArrowError> {
let saved_offset = reader.stream_position()?;
let r = infer_reader_schema(&mut reader, delimiter, max_read_records, has_header)?;
// return the reader seek back to the start
reader.seek(SeekFrom::Start(saved_offset))?;
Ok(r)
}

/// Infer schema of CSV records provided by struct that implements `Read` trait.
///
/// `max_read_records` controlling the maximum number of records to read. If `max_read_records` is
/// not set, all records are read to infer the schema.
///
/// Return inferred schema and number of records used for inference.
#[deprecated(since = "39.0.0", note = "Use Format::infer_schema")]
pub fn infer_reader_schema<R: Read>(
reader: R,
delimiter: u8,
max_read_records: Option<usize>,
has_header: bool,
) -> Result<(Schema, usize), ArrowError> {
let format = Format {
delimiter: Some(delimiter),
header: has_header,
..Default::default()
};
format.infer_schema(reader, max_read_records)
}

/// Infer schema from a list of CSV files by reading through first n records
/// with `max_read_records` controlling the maximum number of records to read.
///
Expand Down Expand Up @@ -1101,14 +1056,6 @@ impl ReaderBuilder {
}
}

/// Set whether the CSV file has headers
#[deprecated(since = "39.0.0", note = "Use with_header")]
#[doc(hidden)]
pub fn has_header(mut self, has_header: bool) -> Self {
self.format.header = has_header;
self
}

/// Set whether the CSV file has a header
pub fn with_header(mut self, has_header: bool) -> Self {
self.format.header = has_header;
Expand Down
19 changes: 0 additions & 19 deletions arrow-csv/src/writer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -256,14 +256,6 @@ impl WriterBuilder {
Self::default()
}

/// Set whether to write headers
#[deprecated(since = "39.0.0", note = "Use Self::with_header")]
#[doc(hidden)]
pub fn has_headers(mut self, has_headers: bool) -> Self {
self.has_header = has_headers;
self
}

/// Set whether to write the CSV file with a header
pub fn with_header(mut self, header: bool) -> Self {
self.has_header = header;
Expand Down Expand Up @@ -397,17 +389,6 @@ impl WriterBuilder {
self.null_value.as_deref().unwrap_or(DEFAULT_NULL_VALUE)
}

/// Use RFC3339 format for date/time/timestamps (default)
#[deprecated(since = "39.0.0", note = "Use WriterBuilder::default()")]
pub fn with_rfc3339(mut self) -> Self {
self.date_format = None;
self.datetime_format = None;
self.time_format = None;
self.timestamp_format = None;
self.timestamp_tz_format = None;
self
}

/// Create a new `Writer`
pub fn build<W: Write>(self, writer: W) -> Writer<W> {
let mut builder = csv::WriterBuilder::new();
Expand Down
5 changes: 0 additions & 5 deletions arrow-data/src/data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,6 @@ use std::sync::Arc;

use crate::{equal, validate_binary_view, validate_string_view};

/// A collection of [`Buffer`]
#[doc(hidden)]
#[deprecated(since = "46.0.0", note = "Use [Buffer]")]
pub type Buffers<'a> = &'a [Buffer];

#[inline]
pub(crate) fn contains_nulls(
null_bit_buffer: Option<&NullBuffer>,
Expand Down
7 changes: 0 additions & 7 deletions arrow-json/src/reader/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -244,13 +244,6 @@ impl ReaderBuilder {
Self { batch_size, ..self }
}

/// Sets if the decoder should coerce primitive values (bool and number) into string
/// when the Schema's column is Utf8 or LargeUtf8.
#[deprecated(since = "39.0.0", note = "Use with_coerce_primitive")]
pub fn coerce_primitive(self, coerce_primitive: bool) -> Self {
self.with_coerce_primitive(coerce_primitive)
}

/// Sets if the decoder should coerce primitive values (bool and number) into string
/// when the Schema's column is Utf8 or LargeUtf8.
pub fn with_coerce_primitive(self, coerce_primitive: bool) -> Self {
Expand Down
16 changes: 0 additions & 16 deletions arrow-ord/src/partition.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ use arrow_buffer::BooleanBuffer;
use arrow_schema::ArrowError;

use crate::cmp::distinct;
use crate::sort::SortColumn;

/// A computed set of partitions, see [`partition`]
#[derive(Debug, Clone)]
Expand Down Expand Up @@ -160,21 +159,6 @@ fn find_boundaries(v: &dyn Array) -> Result<BooleanBuffer, ArrowError> {
Ok(distinct(&v1, &v2)?.values().clone())
}

/// Use [`partition`] instead. Given a list of already sorted columns, find
/// partition ranges that would partition lexicographically equal values across
/// columns.
///
/// The returned vec would be of size k where k is cardinality of the sorted values; Consecutive
/// values will be connected: (a, b) and (b, c), where start = 0 and end = n for the first and last
/// range.
#[deprecated(since = "46.0.0", note = "Use partition")]
pub fn lexicographical_partition_ranges(
columns: &[SortColumn],
) -> Result<impl Iterator<Item = Range<usize>> + '_, ArrowError> {
let cols: Vec<_> = columns.iter().map(|x| x.values.clone()).collect();
Ok(partition(&cols)?.ranges().into_iter())
}

#[cfg(test)]
mod tests {
use std::sync::Arc;
Expand Down
6 changes: 0 additions & 6 deletions object_store/src/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -468,12 +468,6 @@ impl InMemory {
Self { storage }
}

/// Creates a clone of the store
#[deprecated(since = "44.0.0", note = "Use fork() instead")]
pub async fn clone(&self) -> Self {
self.fork()
}

async fn entry(&self, location: &Path) -> Result<Entry> {
let storage = self.storage.read();
let value = storage
Expand Down
Loading

0 comments on commit 77ae7f2

Please sign in to comment.