Skip to content

Commit fbd9d2a

Browse files
chore(docs): improve docstrings in async classes (#964)
1 parent 71b357f commit fbd9d2a

File tree

11 files changed

+642
-285
lines changed

11 files changed

+642
-285
lines changed

packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,14 @@ class _MutateRowsOperationAsync:
5656
5757
Errors are exposed as a MutationsExceptionGroup, which contains a list of
5858
exceptions organized by the related failed mutation entries.
59+
60+
Args:
61+
gapic_client: the client to use for the mutate_rows call
62+
table: the table associated with the request
63+
mutation_entries: a list of RowMutationEntry objects to send to the server
64+
operation_timeout: the timeout to use for the entire operation, in seconds.
65+
attempt_timeout: the timeout to use for each mutate_rows attempt, in seconds.
66+
If not specified, the request will run until operation_timeout is reached.
5967
"""
6068

6169
def __init__(
@@ -67,15 +75,6 @@ def __init__(
6775
attempt_timeout: float | None,
6876
retryable_exceptions: Sequence[type[Exception]] = (),
6977
):
70-
"""
71-
Args:
72-
- gapic_client: the client to use for the mutate_rows call
73-
- table: the table associated with the request
74-
- mutation_entries: a list of RowMutationEntry objects to send to the server
75-
- operation_timeout: the timeout to use for the entire operation, in seconds.
76-
- attempt_timeout: the timeout to use for each mutate_rows attempt, in seconds.
77-
If not specified, the request will run until operation_timeout is reached.
78-
"""
7978
# check that mutations are within limits
8079
total_mutations = sum(len(entry.mutations) for entry in mutation_entries)
8180
if total_mutations > _MUTATE_ROWS_REQUEST_MUTATION_LIMIT:
@@ -121,7 +120,7 @@ async def start(self):
121120
Start the operation, and run until completion
122121
123122
Raises:
124-
- MutationsExceptionGroup: if any mutations failed
123+
MutationsExceptionGroup: if any mutations failed
125124
"""
126125
try:
127126
# trigger mutate_rows
@@ -157,9 +156,9 @@ async def _run_attempt(self):
157156
Run a single attempt of the mutate_rows rpc.
158157
159158
Raises:
160-
- _MutateRowsIncomplete: if there are failed mutations eligible for
161-
retry after the attempt is complete
162-
- GoogleAPICallError: if the gapic rpc fails
159+
_MutateRowsIncomplete: if there are failed mutations eligible for
160+
retry after the attempt is complete
161+
GoogleAPICallError: if the gapic rpc fails
163162
"""
164163
request_entries = [self.mutations[idx].proto for idx in self.remaining_indices]
165164
# track mutations in this request that have not been finalized yet
@@ -213,8 +212,8 @@ def _handle_entry_error(self, idx: int, exc: Exception):
213212
retryable.
214213
215214
Args:
216-
- idx: the index of the mutation that failed
217-
- exc: the exception to add to the list
215+
idx: the index of the mutation that failed
216+
exc: the exception to add to the list
218217
"""
219218
entry = self.mutations[idx].entry
220219
self.errors.setdefault(idx, []).append(exc)

packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py

Lines changed: 29 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,13 @@ class _ReadRowsOperationAsync:
5959
6060
ReadRowsOperation(request, client) handles row merging logic end-to-end, including
6161
performing retries on stream errors.
62+
63+
Args:
64+
query: The query to execute
65+
table: The table to send the request to
66+
operation_timeout: The total time to allow for the operation, in seconds
67+
attempt_timeout: The time to allow for each individual attempt, in seconds
68+
retryable_exceptions: A list of exceptions that should trigger a retry
6269
"""
6370

6471
__slots__ = (
@@ -104,6 +111,9 @@ def __init__(
104111
def start_operation(self) -> AsyncGenerator[Row, None]:
105112
"""
106113
Start the read_rows operation, retrying on retryable errors.
114+
115+
Yields:
116+
Row: The next row in the stream
107117
"""
108118
return retries.retry_target_stream_async(
109119
self._read_rows_attempt,
@@ -119,6 +129,9 @@ def _read_rows_attempt(self) -> AsyncGenerator[Row, None]:
119129
This function is intended to be wrapped by retry logic,
120130
which will call this function until it succeeds or
121131
a non-retryable error is raised.
132+
133+
Yields:
134+
Row: The next row in the stream
122135
"""
123136
# revise request keys and ranges between attempts
124137
if self._last_yielded_row_key is not None:
@@ -151,6 +164,11 @@ async def chunk_stream(
151164
) -> AsyncGenerator[ReadRowsResponsePB.CellChunk, None]:
152165
"""
153166
process chunks out of raw read_rows stream
167+
168+
Args:
169+
stream: the raw read_rows stream from the gapic client
170+
Yields:
171+
ReadRowsResponsePB.CellChunk: the next chunk in the stream
154172
"""
155173
async for resp in await stream:
156174
# extract proto from proto-plus wrapper
@@ -195,9 +213,14 @@ async def chunk_stream(
195213
@staticmethod
196214
async def merge_rows(
197215
chunks: AsyncGenerator[ReadRowsResponsePB.CellChunk, None] | None
198-
):
216+
) -> AsyncGenerator[Row, None]:
199217
"""
200218
Merge chunks into rows
219+
220+
Args:
221+
chunks: the chunk stream to merge
222+
Yields:
223+
Row: the next row in the stream
201224
"""
202225
if chunks is None:
203226
return
@@ -311,10 +334,12 @@ def _revise_request_rowset(
311334
Revise the rows in the request to avoid ones we've already processed.
312335
313336
Args:
314-
- row_set: the row set from the request
315-
- last_seen_row_key: the last row key encountered
337+
row_set: the row set from the request
338+
last_seen_row_key: the last row key encountered
339+
Returns:
340+
RowSetPB: the new rowset after adusting for the last seen key
316341
Raises:
317-
- _RowSetComplete: if there are no rows left to process after the revision
342+
_RowSetComplete: if there are no rows left to process after the revision
318343
"""
319344
# if user is doing a whole table scan, start a new one with the last seen key
320345
if row_set is None or (not row_set.row_ranges and row_set.row_keys is not None):

0 commit comments

Comments
 (0)