Skip to content

Commit

Permalink
feat: add ResourceExhausted to retryable error for Write API unary ca…
Browse files Browse the repository at this point in the history
…lls (#612)

* feat: add ResourceExhausted to retryable error for Write API unary calls
docs: add multiplexing documentation

PiperOrigin-RevId: 545839491

Source-Link: googleapis/googleapis@2b006af

Source-Link: googleapis/googleapis-gen@0d52d38
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMGQ1MmQzODViZDRlNzhjN2IyYzgzNzU1MDEzZmUxMDNlODA0YzM4NCJ9

* 🦉 Updates from OwlBot post-processor

See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md

---------

Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
  • Loading branch information
gcf-owl-bot[bot] and gcf-owl-bot[bot] authored Jul 6, 2023
1 parent c9384d1 commit aebe9d1
Show file tree
Hide file tree
Showing 7 changed files with 68 additions and 35 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -437,10 +437,11 @@ def request_generator():
requests (AsyncIterator[`google.cloud.bigquery_storage_v1.types.AppendRowsRequest`]):
The request object AsyncIterator. Request message for ``AppendRows``.
Due to the nature of AppendRows being a bidirectional
streaming RPC, certain parts of the AppendRowsRequest
need only be specified for the first request sent each
time the gRPC network connection is opened/reopened.
Because AppendRows is a bidirectional streaming RPC,
certain parts of the AppendRowsRequest need only be
specified for the first request before switching table
destinations. You can also switch table destinations
within the same connection for the default stream.
The size of a single AppendRowsRequest must be less than
10 MB in size. Requests larger than this return an
Expand Down Expand Up @@ -575,6 +576,7 @@ async def sample_get_write_stream():
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ResourceExhausted,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
Expand Down Expand Up @@ -685,6 +687,7 @@ async def sample_finalize_write_stream():
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ResourceExhausted,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
Expand Down Expand Up @@ -801,6 +804,7 @@ async def sample_batch_commit_write_streams():
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ResourceExhausted,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
Expand Down Expand Up @@ -919,6 +923,7 @@ async def sample_flush_rows():
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ResourceExhausted,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -672,10 +672,11 @@ def request_generator():
requests (Iterator[google.cloud.bigquery_storage_v1.types.AppendRowsRequest]):
The request object iterator. Request message for ``AppendRows``.
Due to the nature of AppendRows being a bidirectional
streaming RPC, certain parts of the AppendRowsRequest
need only be specified for the first request sent each
time the gRPC network connection is opened/reopened.
Because AppendRows is a bidirectional streaming RPC,
certain parts of the AppendRowsRequest need only be
specified for the first request before switching table
destinations. You can also switch table destinations
within the same connection for the default stream.
The size of a single AppendRowsRequest must be less than
10 MB in size. Requests larger than this return an
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,7 @@ def _prep_wrapped_messages(self, client_info):
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ResourceExhausted,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
Expand All @@ -181,6 +182,7 @@ def _prep_wrapped_messages(self, client_info):
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ResourceExhausted,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
Expand All @@ -196,6 +198,7 @@ def _prep_wrapped_messages(self, client_info):
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ResourceExhausted,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
Expand All @@ -211,6 +214,7 @@ def _prep_wrapped_messages(self, client_info):
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ResourceExhausted,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
Expand Down
60 changes: 42 additions & 18 deletions google/cloud/bigquery_storage_v1/types/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,10 +358,11 @@ class CreateWriteStreamRequest(proto.Message):
class AppendRowsRequest(proto.Message):
r"""Request message for ``AppendRows``.
Due to the nature of AppendRows being a bidirectional streaming RPC,
certain parts of the AppendRowsRequest need only be specified for
the first request sent each time the gRPC network connection is
opened/reopened.
Because AppendRows is a bidirectional streaming RPC, certain parts
of the AppendRowsRequest need only be specified for the first
request before switching table destinations. You can also switch
table destinations within the same connection for the default
stream.
The size of a single AppendRowsRequest must be less than 10 MB in
size. Requests larger than this return an error, typically
Expand All @@ -372,11 +373,14 @@ class AppendRowsRequest(proto.Message):
Attributes:
write_stream (str):
Required. The write_stream identifies the target of the
append operation, and only needs to be specified as part of
the first request on the gRPC connection. If provided for
subsequent requests, it must match the value of the first
request.
Required. The write_stream identifies the append operation.
It must be provided in the following scenarios:
- In the first request to an AppendRows connection.
- In all subsequent requests to an AppendRows connection,
if you use the same connection to write to multiple
tables or change the input schema for default streams.
For explicitly created write streams, the format is:
Expand All @@ -385,6 +389,23 @@ class AppendRowsRequest(proto.Message):
For the special default stream, the format is:
- ``projects/{project}/datasets/{dataset}/tables/{table}/streams/_default``.
An example of a possible sequence of requests with
write_stream fields within a single connection:
- r1: {write_stream: stream_name_1}
- r2: {write_stream: /*omit*/}
- r3: {write_stream: /*omit*/}
- r4: {write_stream: stream_name_2}
- r5: {write_stream: stream_name_2}
The destination changed in request_4, so the write_stream
field must be populated in all subsequent requests in this
stream.
offset (google.protobuf.wrappers_pb2.Int64Value):
If present, the write is only performed if the next append
offset is same as the provided value. If not present, the
Expand Down Expand Up @@ -420,10 +441,10 @@ class AppendRowsRequest(proto.Message):
"""

class MissingValueInterpretation(proto.Enum):
r"""An enum to indicate how to interpret missing values. Missing
values are fields present in user schema but missing in rows. A
missing value can represent a NULL or a column default value
defined in BigQuery table schema.
r"""An enum to indicate how to interpret missing values of fields
that are present in user schema but missing in rows. A missing
value can represent a NULL or a column default value defined in
BigQuery table schema.
Values:
MISSING_VALUE_INTERPRETATION_UNSPECIFIED (0):
Expand All @@ -446,11 +467,14 @@ class ProtoData(proto.Message):
Attributes:
writer_schema (google.cloud.bigquery_storage_v1.types.ProtoSchema):
Proto schema used to serialize the data.
This value only needs to be provided as part of
the first request on a gRPC network connection,
and will be ignored for subsequent requests on
the connection.
The protocol buffer schema used to serialize the data.
Provide this value whenever:
- You send the first request of an RPC connection.
- You change the input schema.
- You specify a new destination table.
rows (google.cloud.bigquery_storage_v1.types.ProtoRows):
Serialized row data in protobuf message
format. Currently, the backend expects the
Expand Down
13 changes: 6 additions & 7 deletions google/cloud/bigquery_storage_v1/types/stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,13 +136,12 @@ class ReadSession(proto.Message):
incomplete or stale.
estimated_total_physical_file_size (int):
Output only. A pre-projected estimate of the
total physical size (in bytes) of files this
session will scan when all streams are
completely consumed. This estimate does not
depend on the selected columns and can be based
on metadata from the table which might be
incomplete or stale. Only set for BigLake
tables.
total physical size of files (in bytes) that
this session will scan when all streams are
consumed. This estimate is independent of the
selected columns and can be based on incomplete
or stale metadata from the table. This field is
only set for BigLake tables.
estimated_row_count (int):
Output only. An estimate on the number of
rows present in this session's streams. This
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
],
"language": "PYTHON",
"name": "google-cloud-bigquery-storage",
"version": "2.21.0"
"version": "0.1.0"
},
"snippets": [
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
],
"language": "PYTHON",
"name": "google-cloud-bigquery-storage",
"version": "2.21.0"
"version": "0.1.0"
},
"snippets": [
{
Expand Down

0 comments on commit aebe9d1

Please sign in to comment.