Skip to content

Commit 23ceffd

Browse files
feat: Add BigQuery Metastore Partition Service API version v1alpha (#471)
* docs: A comment for message `StreamMetastorePartitionsRequest` is changed docs: A comment for message `StreamMetastorePartitionsResponse` is changed docs: A comment for field `location_uri` in message `.google.cloud.bigquery.storage.v1alpha.StorageDescriptor` is changed PiperOrigin-RevId: 670602530 Source-Link: googleapis/googleapis@9c6ceea Source-Link: googleapis/googleapis-gen@1ab1ed5 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMWFiMWVkNTljZTQzYmE5NmU3MWNiNTg5NzA0MzM5ZjNiOGM2ZTUyNCJ9 feat: add documentation for partition value limit BREAKING CHANGE: make the client library gRPC only PiperOrigin-RevId: 666551276 Source-Link: googleapis/googleapis@6f3c628 Source-Link: googleapis/googleapis-gen@9c59969 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOWM1OTk2OThmOTBhZTU2YTYxZTM4YTI2NmJkNzcwNTYxNGM1ZGZmNiJ9 feat: Support for a custom error message for BatchSizeTooLargeError PiperOrigin-RevId: 665560115 Source-Link: googleapis/googleapis@869c2e1 Source-Link: googleapis/googleapis-gen@cb7632b Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiY2I3NjMyYjU2MTlmZGU4ZTI0ZTQ3YTMwYThiNWE3MjBhYzk4ODZlMiJ9 feat: Add BigQuery Metastore Partition Service API version v1alpha PiperOrigin-RevId: 662212485 Source-Link: googleapis/googleapis@456a812 Source-Link: googleapis/googleapis-gen@2ec266e Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMmVjMjY2ZTZkYTAzMjA4YTc2YjBmZDYwMDFiYTdkZjkzZGFlNDRlNiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: Increase method timeout to 240s for BigQuery Metastore Partition Service API version v1alpha PiperOrigin-RevId: 676173688 Source-Link: googleapis/googleapis@02f1184 Source-Link: googleapis/googleapis-gen@93f225b Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOTNmMjI1YjFlNWM4OTcxMmZhMTdkYzM5OGY5OTBiYjFjZDkyNzAyNSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: export v1alpha api * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com> Co-authored-by: Alvaro Viebrantz <aviebrantz@google.com>
1 parent c60587d commit 23ceffd

18 files changed

+11524
-0
lines changed
Lines changed: 282 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,282 @@
1+
// Copyright 2024 Google LLC
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
syntax = "proto3";
16+
17+
package google.cloud.bigquery.storage.v1alpha;
18+
19+
import "google/api/annotations.proto";
20+
import "google/api/client.proto";
21+
import "google/api/field_behavior.proto";
22+
import "google/api/resource.proto";
23+
import "google/cloud/bigquery/storage/v1alpha/partition.proto";
24+
import "google/protobuf/empty.proto";
25+
import "google/protobuf/field_mask.proto";
26+
27+
option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1Alpha";
28+
option go_package = "cloud.google.com/go/bigquery/storage/apiv1alpha/storagepb;storagepb";
29+
option java_multiple_files = true;
30+
option java_outer_classname = "MetastorePartitionServiceProto";
31+
option java_package = "com.google.cloud.bigquery.storage.v1alpha";
32+
option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1alpha";
33+
option (google.api.resource_definition) = {
34+
type: "bigquery.googleapis.com/Table"
35+
pattern: "projects/{project}/datasets/{dataset}/tables/{table}"
36+
};
37+
38+
// BigQuery Metastore Partition Service API.
39+
// This service is used for managing metastore partitions in BigQuery metastore.
40+
// The service supports only batch operations for write.
41+
service MetastorePartitionService {
42+
option (google.api.default_host) = "bigquerystorage.googleapis.com";
43+
option (google.api.oauth_scopes) =
44+
"https://www.googleapis.com/auth/bigquery,"
45+
"https://www.googleapis.com/auth/cloud-platform";
46+
47+
// Adds metastore partitions to a table.
48+
rpc BatchCreateMetastorePartitions(BatchCreateMetastorePartitionsRequest)
49+
returns (BatchCreateMetastorePartitionsResponse) {
50+
option (google.api.http) = {
51+
post: "/v1alpha/{parent=projects/*/locations/*/datasets/*/tables/*}/partitions:batchCreate"
52+
body: "*"
53+
};
54+
}
55+
56+
// Deletes metastore partitions from a table.
57+
rpc BatchDeleteMetastorePartitions(BatchDeleteMetastorePartitionsRequest)
58+
returns (google.protobuf.Empty) {
59+
option (google.api.http) = {
60+
post: "/v1alpha/{parent=projects/*/locations/*/datasets/*/tables/*}/partitions:batchDelete"
61+
body: "*"
62+
};
63+
}
64+
65+
// Updates metastore partitions in a table.
66+
rpc BatchUpdateMetastorePartitions(BatchUpdateMetastorePartitionsRequest)
67+
returns (BatchUpdateMetastorePartitionsResponse) {
68+
option (google.api.http) = {
69+
post: "/v1alpha/{parent=projects/*/locations/*/datasets/*/tables/*}/partitions:batchUpdate"
70+
body: "*"
71+
};
72+
}
73+
74+
// Gets metastore partitions from a table.
75+
rpc ListMetastorePartitions(ListMetastorePartitionsRequest)
76+
returns (ListMetastorePartitionsResponse) {
77+
option (google.api.http) = {
78+
get: "/v1alpha/{parent=projects/*/locations/*/datasets/*/tables/*}/partitions:list"
79+
};
80+
option (google.api.method_signature) = "parent";
81+
}
82+
83+
// This is a bi-di streaming rpc method that allows the client to send
84+
// a stream of partitions and commit all of them atomically at the end.
85+
// If the commit is successful, the server will return a
86+
// response and close the stream. If the commit fails (due to duplicate
87+
// partitions or other reason), the server will close the stream with an
88+
// error. This method is only available via the gRPC API (not REST).
89+
rpc StreamMetastorePartitions(stream StreamMetastorePartitionsRequest)
90+
returns (stream StreamMetastorePartitionsResponse) {}
91+
}
92+
93+
// Request message for CreateMetastorePartition. The MetastorePartition is
94+
// uniquely identified by values, which is an ordered list. Hence, there is no
95+
// separate name or partition id field.
96+
message CreateMetastorePartitionRequest {
97+
// Required. Reference to the table to where the metastore partition to be
98+
// added, in the format of
99+
// projects/{project}/databases/{databases}/tables/{table}.
100+
string parent = 1 [
101+
(google.api.field_behavior) = REQUIRED,
102+
(google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" }
103+
];
104+
105+
// Required. The metastore partition to be added.
106+
MetastorePartition metastore_partition = 2
107+
[(google.api.field_behavior) = REQUIRED];
108+
}
109+
110+
// Request message for BatchCreateMetastorePartitions.
111+
message BatchCreateMetastorePartitionsRequest {
112+
// Required. Reference to the table to where the metastore partitions to be
113+
// added, in the format of
114+
// projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
115+
string parent = 1 [
116+
(google.api.field_behavior) = REQUIRED,
117+
(google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" }
118+
];
119+
120+
// Required. Requests to add metastore partitions to the table.
121+
repeated CreateMetastorePartitionRequest requests = 2
122+
[(google.api.field_behavior) = REQUIRED];
123+
124+
// Optional. Mimics the ifNotExists flag in IMetaStoreClient
125+
// add_partitions(..). If the flag is set to false, the server will return
126+
// ALREADY_EXISTS if any partition already exists. If the flag is set to true,
127+
// the server will skip existing partitions and insert only the non-existing
128+
// partitions.
129+
bool skip_existing_partitions = 3 [(google.api.field_behavior) = OPTIONAL];
130+
}
131+
132+
// Response message for BatchCreateMetastorePartitions.
133+
message BatchCreateMetastorePartitionsResponse {
134+
// The list of metastore partitions that have been created.
135+
repeated MetastorePartition partitions = 1;
136+
}
137+
138+
// Request message for BatchDeleteMetastorePartitions. The MetastorePartition is
139+
// uniquely identified by values, which is an ordered list. Hence, there is no
140+
// separate name or partition id field.
141+
message BatchDeleteMetastorePartitionsRequest {
142+
// Required. Reference to the table to which these metastore partitions
143+
// belong, in the format of
144+
// projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
145+
string parent = 1 [
146+
(google.api.field_behavior) = REQUIRED,
147+
(google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" }
148+
];
149+
150+
// Required. The list of metastore partitions (identified by its values) to be
151+
// deleted. A maximum of 100 partitions can be deleted in a batch.
152+
repeated MetastorePartitionValues partition_values = 2
153+
[(google.api.field_behavior) = REQUIRED];
154+
}
155+
156+
// Request message for UpdateMetastorePartition.
157+
message UpdateMetastorePartitionRequest {
158+
// Required. The metastore partition to be updated.
159+
MetastorePartition metastore_partition = 1
160+
[(google.api.field_behavior) = REQUIRED];
161+
162+
// Optional. The list of fields to update.
163+
google.protobuf.FieldMask update_mask = 2
164+
[(google.api.field_behavior) = OPTIONAL];
165+
}
166+
167+
// Request message for BatchUpdateMetastorePartitions.
168+
message BatchUpdateMetastorePartitionsRequest {
169+
// Required. Reference to the table to which these metastore partitions
170+
// belong, in the format of
171+
// projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
172+
string parent = 1 [
173+
(google.api.field_behavior) = REQUIRED,
174+
(google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" }
175+
];
176+
177+
// Required. Requests to update metastore partitions in the table.
178+
repeated UpdateMetastorePartitionRequest requests = 2
179+
[(google.api.field_behavior) = REQUIRED];
180+
}
181+
182+
// Response message for BatchUpdateMetastorePartitions.
183+
message BatchUpdateMetastorePartitionsResponse {
184+
// The list of metastore partitions that have been updated.
185+
repeated MetastorePartition partitions = 1;
186+
}
187+
188+
// Request message for ListMetastorePartitions.
189+
message ListMetastorePartitionsRequest {
190+
// Required. Reference to the table to which these metastore partitions
191+
// belong, in the format of
192+
// projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
193+
string parent = 1 [
194+
(google.api.field_behavior) = REQUIRED,
195+
(google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" }
196+
];
197+
198+
// Optional. SQL text filtering statement, similar to a WHERE clause in a
199+
// query. Only supports single-row expressions. Aggregate functions are not
200+
// supported.
201+
//
202+
// Examples: "int_field > 5"
203+
// "date_field = CAST('2014-9-27' as DATE)"
204+
// "nullable_field is not NULL"
205+
// "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
206+
// "numeric_field BETWEEN 1.0 AND 5.0"
207+
// Restricted to a maximum length for 1 MB.
208+
string filter = 2 [(google.api.field_behavior) = OPTIONAL];
209+
}
210+
211+
// Response message for ListMetastorePartitions.
212+
message ListMetastorePartitionsResponse {
213+
// The response depends on the number of metastore partitions to be returned;
214+
// it can be a list of partitions or a list of
215+
// [ReadStream]((https://cloud.google.com/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1#readstream))
216+
// objects. For the second situation, the BigQuery [Read API
217+
// ReadRows](https://cloud.google.com/bigquery/docs/reference/storage#read_from_a_session_stream)
218+
// method must be used to stream the data and convert it into a list of
219+
// partitions.
220+
oneof response {
221+
// The list of partitions.
222+
MetastorePartitionList partitions = 1;
223+
224+
// The list of streams.
225+
StreamList streams = 2;
226+
}
227+
}
228+
229+
// The top-level message sent by the client to the
230+
// [Partitions.StreamMetastorePartitions][] method.
231+
// Follows the default gRPC streaming maximum size of 4 MB.
232+
message StreamMetastorePartitionsRequest {
233+
// Required. Reference to the table to where the partition to be added, in the
234+
// format of
235+
// projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
236+
string parent = 1 [
237+
(google.api.field_behavior) = REQUIRED,
238+
(google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" }
239+
];
240+
241+
// Optional. A list of metastore partitions to be added to the table.
242+
repeated MetastorePartition metastore_partitions = 2
243+
[(google.api.field_behavior) = OPTIONAL];
244+
245+
// Optional. Mimics the ifNotExists flag in IMetaStoreClient
246+
// add_partitions(..). If the flag is set to false, the server will return
247+
// ALREADY_EXISTS on commit if any partition already exists. If the flag is
248+
// set to true:
249+
// 1) the server will skip existing partitions
250+
// insert only the non-existing partitions as part of the commit.
251+
// 2) The client must set the `skip_existing_partitions` field to true for
252+
// all requests in the stream.
253+
bool skip_existing_partitions = 3 [(google.api.field_behavior) = OPTIONAL];
254+
}
255+
256+
// This is the response message sent by the server
257+
// to the client for the [Partitions.StreamMetastorePartitions][] method when
258+
// the commit is successful. Server will close the stream after sending this
259+
// message.
260+
message StreamMetastorePartitionsResponse {
261+
// Total count of partitions streamed by the client during the lifetime of the
262+
// stream. This is only set in the final response message before closing the
263+
// stream.
264+
int64 total_partitions_streamed_count = 2;
265+
266+
// Total count of partitions inserted by the server during the lifetime of the
267+
// stream. This is only set in the final response message before closing the
268+
// stream.
269+
int64 total_partitions_inserted_count = 3;
270+
}
271+
272+
// Structured custom error message for batch size too large error.
273+
// The error can be attached as error details in the returned rpc Status for
274+
// more structured error handling in the client.
275+
message BatchSizeTooLargeError {
276+
// The maximum number of items that are supported in a single batch. This is
277+
// returned as a hint to the client to adjust the batch size.
278+
int64 max_batch_size = 1;
279+
280+
// Optional. The error message that is returned to the client.
281+
string error_message = 2 [(google.api.field_behavior) = OPTIONAL];
282+
}

0 commit comments

Comments
 (0)