Skip to content

Commit

Permalink
clean up
Browse files Browse the repository at this point in the history
  • Loading branch information
Matt711 committed Dec 18, 2024
1 parent 76ffc61 commit 48fc8a9
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 19 deletions.
8 changes: 6 additions & 2 deletions python/pylibcudf/pylibcudf/io/avro.pyi
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
# Copyright (c) 2024, NVIDIA CORPORATION.
from pylibcudf.io.types import SourceInfo, TableWithMetadata

from rmm._cuda.stream import Stream

from pylibcudf.io.types import SourceInfo, TableWithMetadata

__all__ = ["AvroReaderOptions", "AvroReaderOptionsBuilder", "read_avro"]

class AvroReaderOptions:
Expand All @@ -14,4 +16,6 @@ class AvroReaderOptionsBuilder:
def num_rows(num_rows: int) -> AvroReaderOptionsBuilder: ...
def build(self) -> AvroReaderOptions: ...

def read_avro(options: AvroReaderOptions, stream: stream = None) -> TableWithMetadata: ...
def read_avro(
options: AvroReaderOptions, stream: Stream = None
) -> TableWithMetadata: ...
9 changes: 4 additions & 5 deletions python/pylibcudf/pylibcudf/io/csv.pyi
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# Copyright (c) 2024, NVIDIA CORPORATION.

from collections.abc import Mapping

from typing_extensions import Self

from rmm._cuda.stream import Stream

from pylibcudf.io.types import (
CompressionType,
QuoteStyle,
Expand All @@ -13,7 +13,6 @@ from pylibcudf.io.types import (
)
from pylibcudf.table import Table
from pylibcudf.types import DataType
from rmm._cuda.stream import Stream

class CsvReaderOptions:
def __init__(self): ...
Expand Down Expand Up @@ -58,9 +57,9 @@ class CsvReaderOptionsBuilder:

def read_csv(
options: CsvReaderOptions,
stream: stream = None,
stream: Stream = None,
) -> TableWithMetadata: ...
def write_csv(options: CsvWriterOptionsBuilder, stream: stream = None): ...
def write_csv(options: CsvWriterOptionsBuilder, stream: Stream = None): ...

class CsvWriterOptions:
def __init__(self): ...
Expand Down
11 changes: 5 additions & 6 deletions python/pylibcudf/pylibcudf/io/json.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ from typing import TypeAlias

from typing_extensions import Self

from rmm._cuda.stream import Stream

from pylibcudf.column import Column
from pylibcudf.io.types import (
CompressionType,
Expand All @@ -14,8 +16,6 @@ from pylibcudf.io.types import (
)
from pylibcudf.table import Table
from pylibcudf.types import DataType
from rmm._cuda.stream import Stream


ChildNameToTypeMap: TypeAlias = Mapping[str, ChildNameToTypeMap]

Expand All @@ -32,7 +32,7 @@ def read_json(
mixed_types_as_string: bool = False,
prune_columns: bool = False,
recovery_mode: JSONRecoveryMode = JSONRecoveryMode.FAIL,
stream: stream = None,
stream: Stream = None,
) -> TableWithMetadata: ...

class JsonWriterOptions:
Expand All @@ -49,8 +49,7 @@ class JsonWriterOptionsBuilder:
def lines(self, val: bool) -> Self: ...
def build(self) -> JsonWriterOptions: ...

def write_json(options: JsonWriterOptions, stream: stream = None) -> None: ...

def write_json(options: JsonWriterOptions, stream: Stream = None) -> None: ...
def chunked_read_json(
source_info: SourceInfo,
dtypes: list[NameAndType] | None = None,
Expand All @@ -60,5 +59,5 @@ def chunked_read_json(
prune_columns: bool = False,
recovery_mode: JSONRecoveryMode = JSONRecoveryMode.FAIL,
chunk_size: int = 100_000_000,
stream: stream = None,
stream: Stream = None,
) -> tuple[list[Column], list[str], ChildNameToTypeMap]: ...
5 changes: 3 additions & 2 deletions python/pylibcudf/pylibcudf/io/orc.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

from typing import Any, Self

from rmm._cuda.stream import Stream

from pylibcudf.io.types import (
CompressionType,
SinkInfo,
Expand All @@ -12,7 +14,6 @@ from pylibcudf.io.types import (
)
from pylibcudf.table import Table
from pylibcudf.types import DataType
from rmm._cuda.stream import Stream

def read_orc(
source_info: SourceInfo,
Expand All @@ -24,7 +25,7 @@ def read_orc(
use_np_dtypes: bool = True,
timestamp_type: DataType | None = None,
decimal128_columns: list[str] | None = None,
stream: Strem = Stream(),
stream: Stream = Stream(),
) -> TableWithMetadata: ...

class OrcColumnStatistics:
Expand Down
13 changes: 9 additions & 4 deletions python/pylibcudf/pylibcudf/io/parquet.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ from collections.abc import Mapping

from typing_extensions import Self

from rmm._cuda.stream import Stream

from pylibcudf.expressions import Expression
from pylibcudf.io.types import (
CompressionType,
Expand All @@ -16,7 +18,6 @@ from pylibcudf.io.types import (
TableWithMetadata,
)
from pylibcudf.table import Table
from rmm._cuda.stream import Stream

class ParquetReaderOptions:
def __init__(self): ...
Expand Down Expand Up @@ -55,7 +56,7 @@ class ChunkedParquetReader:

def read_parquet(
options: ParquetReaderOptions,
stream: stream = None,
stream: Stream = None,
) -> TableWithMetadata: ...

class ParquetWriterOptions:
Expand Down Expand Up @@ -87,14 +88,18 @@ class ParquetWriterOptionsBuilder:
def write_arrow_schema(self, enabled: bool) -> Self: ...
def build(self) -> ParquetWriterOptions: ...

def write_parquet(options: ParquetWriterOptions, stream: stream = None) -> memoryview: ...
def write_parquet(
options: ParquetWriterOptions, stream: Stream = None
) -> memoryview: ...

class ParquetChunkedWriter:
def __init__(self): ...
def close(self, metadata_file_path: list) -> memoryview: ...
def write(self, table: Table) -> None: ...
@staticmethod
def from_options(options: ChunkedParquetWriterOptions, stream: stream = None) -> Self: ...
def from_options(
options: ChunkedParquetWriterOptions, stream: Stream = None
) -> Self: ...

class ChunkedParquetWriterOptions:
def __init__(self): ...
Expand Down

0 comments on commit 48fc8a9

Please sign in to comment.