diff --git a/generation_config.yaml b/generation_config.yaml index 4dd452c45d78..4099fd2ec797 100644 --- a/generation_config.yaml +++ b/generation_config.yaml @@ -349,6 +349,7 @@ libraries: - proto_path: google/cloud/bigquery/biglake/v1 - proto_path: google/cloud/biglake/v1 - proto_path: google/cloud/bigquery/biglake/v1alpha1 + - proto_path: google/cloud/biglake/hive/v1beta - api_shortname: analyticshub name_pretty: Analytics Hub product_documentation: https://cloud.google.com/analytics-hub diff --git a/java-biglake/.OwlBot-hermetic.yaml b/java-biglake/.OwlBot-hermetic.yaml index 4925807f8728..4ffead625ccc 100644 --- a/java-biglake/.OwlBot-hermetic.yaml +++ b/java-biglake/.OwlBot-hermetic.yaml @@ -39,6 +39,14 @@ deep-copy-regex: dest: "/owl-bot-staging/java-biglake/$1/google-cloud-biglake/src" - source: "/google/cloud/biglake/(v.*)/.*-java/samples/snippets/generated" dest: "/owl-bot-staging/java-biglake/$1/samples/snippets/generated" +- source: "/google/cloud/biglake/hive/(v.*)/.*-java/proto-google-.*/src" + dest: "/owl-bot-staging/java-biglake/$1/proto-google-cloud-biglake-$1/src" +- source: "/google/cloud/biglake/hive/(v.*)/.*-java/grpc-google-.*/src" + dest: "/owl-bot-staging/java-biglake/$1/grpc-google-cloud-biglake-$1/src" +- source: "/google/cloud/biglake/hive/(v.*)/.*-java/gapic-google-.*/src" + dest: "/owl-bot-staging/java-biglake/$1/google-cloud-biglake/src" +- source: "/google/cloud/biglake/hive/(v.*)/.*-java/samples/snippets/generated" + dest: "/owl-bot-staging/java-biglake/$1/samples/snippets/generated" api-name: biglake diff --git a/java-biglake/google-cloud-biglake-bom/pom.xml b/java-biglake/google-cloud-biglake-bom/pom.xml index 2da311db56df..ab9395191399 100644 --- a/java-biglake/google-cloud-biglake-bom/pom.xml +++ b/java-biglake/google-cloud-biglake-bom/pom.xml @@ -39,6 +39,11 @@ grpc-google-cloud-biglake-v1 0.76.0-SNAPSHOT + + com.google.api.grpc + grpc-google-cloud-biglake-v1beta + 0.76.0-SNAPSHOT + com.google.api.grpc proto-google-cloud-biglake-v1alpha1 @@ -49,6 +54,11 @@ proto-google-cloud-biglake-v1 0.76.0-SNAPSHOT + + com.google.api.grpc + proto-google-cloud-biglake-v1beta + 0.76.0-SNAPSHOT + diff --git a/java-biglake/google-cloud-biglake/pom.xml b/java-biglake/google-cloud-biglake/pom.xml index 20c881ab91bb..e2539b8f4074 100644 --- a/java-biglake/google-cloud-biglake/pom.xml +++ b/java-biglake/google-cloud-biglake/pom.xml @@ -41,6 +41,10 @@ proto-google-common-protos + + com.google.api.grpc + proto-google-cloud-biglake-v1beta + com.google.api.grpc proto-google-cloud-biglake-v1 @@ -70,6 +74,11 @@ grpc-google-common-protos test + + com.google.api.grpc + grpc-google-cloud-biglake-v1beta + test + com.google.api.grpc grpc-google-cloud-biglake-v1 diff --git a/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceClient.java b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceClient.java new file mode 100644 index 000000000000..6041667d56b2 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceClient.java @@ -0,0 +1,3152 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.paging.AbstractFixedSizeCollection; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.paging.AbstractPagedListResponse; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.biglake.hive.v1beta.stub.HiveMetastoreServiceStub; +import com.google.cloud.biglake.hive.v1beta.stub.HiveMetastoreServiceStubSettings; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: Hive Metastore Service is a biglake service that allows users to manage + * their external Hive catalogs. Full API compatibility with OSS Hive Metastore APIs is not + * supported. The methods match the Hive Metastore API spec mostly except for a few exceptions. + * These include listing resources with pattern, environment context which are combined in a single + * List API, return of ListResponse object instead of a list of resources, transactions, locks, etc. + * + *

The BigLake Hive Metastore API defines the following resources: + * + *

+ * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+ *     HiveMetastoreServiceClient.create()) {
+ *   ProjectName parent = ProjectName.of("[PROJECT]");
+ *   HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build();
+ *   String hiveCatalogId = "hiveCatalogId-575314556";
+ *   HiveCatalog response =
+ *       hiveMetastoreServiceClient.createHiveCatalog(parent, hiveCatalog, hiveCatalogId);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the HiveMetastoreServiceClient object to clean up + * resources such as threads. In the example above, try-with-resources is used, which automatically + * calls close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

CreateHiveCatalog

Creates a new hive catalog.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createHiveCatalog(CreateHiveCatalogRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createHiveCatalog(ProjectName parent, HiveCatalog hiveCatalog, String hiveCatalogId) + *

  • createHiveCatalog(String parent, HiveCatalog hiveCatalog, String hiveCatalogId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createHiveCatalogCallable() + *

+ *

GetHiveCatalog

Gets the catalog specified by the resource name.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getHiveCatalog(GetHiveCatalogRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getHiveCatalog(CatalogName name) + *

  • getHiveCatalog(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getHiveCatalogCallable() + *

+ *

ListHiveCatalogs

List all catalogs in a specified project.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listHiveCatalogs(ListHiveCatalogsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listHiveCatalogs(ProjectName parent) + *

  • listHiveCatalogs(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listHiveCatalogsPagedCallable() + *

  • listHiveCatalogsCallable() + *

+ *

UpdateHiveCatalog

Updates an existing catalog.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateHiveCatalog(UpdateHiveCatalogRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • updateHiveCatalog(HiveCatalog hiveCatalog, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateHiveCatalogCallable() + *

+ *

DeleteHiveCatalog

Deletes an existing catalog specified by the catalog ID. Delete will fail if the catalog is not empty.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteHiveCatalog(DeleteHiveCatalogRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteHiveCatalog(CatalogName name) + *

  • deleteHiveCatalog(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteHiveCatalogCallable() + *

+ *

CreateHiveDatabase

Creates a new database.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createHiveDatabase(CreateHiveDatabaseRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createHiveDatabase(CatalogName parent, HiveDatabase hiveDatabase, String hiveDatabaseId) + *

  • createHiveDatabase(String parent, HiveDatabase hiveDatabase, String hiveDatabaseId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createHiveDatabaseCallable() + *

+ *

GetHiveDatabase

Gets the database specified by the resource name.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getHiveDatabase(GetHiveDatabaseRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getHiveDatabase(NamespaceName name) + *

  • getHiveDatabase(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getHiveDatabaseCallable() + *

+ *

ListHiveDatabases

List all databases in a specified catalog.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listHiveDatabases(ListHiveDatabasesRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listHiveDatabases(CatalogName parent) + *

  • listHiveDatabases(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listHiveDatabasesPagedCallable() + *

  • listHiveDatabasesCallable() + *

+ *

UpdateHiveDatabase

Updates an existing database specified by the database name.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateHiveDatabase(UpdateHiveDatabaseRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • updateHiveDatabase(HiveDatabase hiveDatabase, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateHiveDatabaseCallable() + *

+ *

DeleteHiveDatabase

Deletes an existing database specified by the database name.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteHiveDatabase(DeleteHiveDatabaseRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteHiveDatabase(NamespaceName name) + *

  • deleteHiveDatabase(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteHiveDatabaseCallable() + *

+ *

CreateHiveTable

Creates a new hive table.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createHiveTable(CreateHiveTableRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createHiveTable(NamespaceName parent, HiveTable hiveTable, String hiveTableId) + *

  • createHiveTable(String parent, HiveTable hiveTable, String hiveTableId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createHiveTableCallable() + *

+ *

GetHiveTable

Gets the table specified by the resource name.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getHiveTable(GetHiveTableRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getHiveTable(TableName name) + *

  • getHiveTable(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getHiveTableCallable() + *

+ *

ListHiveTables

List all hive tables in a specified project under the hive catalog and database.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listHiveTables(ListHiveTablesRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listHiveTables(NamespaceName parent) + *

  • listHiveTables(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listHiveTablesPagedCallable() + *

  • listHiveTablesCallable() + *

+ *

UpdateHiveTable

Updates an existing table specified by the table name.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateHiveTable(UpdateHiveTableRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • updateHiveTable(HiveTable hiveTable, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateHiveTableCallable() + *

+ *

DeleteHiveTable

Deletes an existing table specified by the table name.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteHiveTable(DeleteHiveTableRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteHiveTable(TableName name) + *

  • deleteHiveTable(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteHiveTableCallable() + *

+ *

BatchCreatePartitions

Adds partitions to a table.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchCreatePartitions(BatchCreatePartitionsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • batchCreatePartitions(TableName parent) + *

  • batchCreatePartitions(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchCreatePartitionsCallable() + *

+ *

BatchDeletePartitions

Deletes partitions from a table.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchDeletePartitions(BatchDeletePartitionsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • batchDeletePartitions(TableName parent) + *

  • batchDeletePartitions(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchDeletePartitionsCallable() + *

+ *

BatchUpdatePartitions

Updates partitions in a table.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchUpdatePartitions(BatchUpdatePartitionsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • batchUpdatePartitions(TableName parent) + *

  • batchUpdatePartitions(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchUpdatePartitionsCallable() + *

+ *

ListPartitions

Streams list of partitions from a table.

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listPartitionsCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of HiveMetastoreServiceSettings + * to create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * HiveMetastoreServiceSettings hiveMetastoreServiceSettings =
+ *     HiveMetastoreServiceSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * HiveMetastoreServiceClient hiveMetastoreServiceClient =
+ *     HiveMetastoreServiceClient.create(hiveMetastoreServiceSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * HiveMetastoreServiceSettings hiveMetastoreServiceSettings =
+ *     HiveMetastoreServiceSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * HiveMetastoreServiceClient hiveMetastoreServiceClient =
+ *     HiveMetastoreServiceClient.create(hiveMetastoreServiceSettings);
+ * }
+ * + *

To use REST (HTTP1.1/JSON) transport (instead of gRPC) for sending and receiving requests over + * the wire: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * HiveMetastoreServiceSettings hiveMetastoreServiceSettings =
+ *     HiveMetastoreServiceSettings.newHttpJsonBuilder().build();
+ * HiveMetastoreServiceClient hiveMetastoreServiceClient =
+ *     HiveMetastoreServiceClient.create(hiveMetastoreServiceSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class HiveMetastoreServiceClient implements BackgroundResource { + private final HiveMetastoreServiceSettings settings; + private final HiveMetastoreServiceStub stub; + + /** Constructs an instance of HiveMetastoreServiceClient with default settings. */ + public static final HiveMetastoreServiceClient create() throws IOException { + return create(HiveMetastoreServiceSettings.newBuilder().build()); + } + + /** + * Constructs an instance of HiveMetastoreServiceClient, using the given settings. The channels + * are created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final HiveMetastoreServiceClient create(HiveMetastoreServiceSettings settings) + throws IOException { + return new HiveMetastoreServiceClient(settings); + } + + /** + * Constructs an instance of HiveMetastoreServiceClient, using the given stub for making calls. + * This is for advanced usage - prefer using create(HiveMetastoreServiceSettings). + */ + public static final HiveMetastoreServiceClient create(HiveMetastoreServiceStub stub) { + return new HiveMetastoreServiceClient(stub); + } + + /** + * Constructs an instance of HiveMetastoreServiceClient, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected HiveMetastoreServiceClient(HiveMetastoreServiceSettings settings) throws IOException { + this.settings = settings; + this.stub = ((HiveMetastoreServiceStubSettings) settings.getStubSettings()).createStub(); + } + + protected HiveMetastoreServiceClient(HiveMetastoreServiceStub stub) { + this.settings = null; + this.stub = stub; + } + + public final HiveMetastoreServiceSettings getSettings() { + return settings; + } + + public HiveMetastoreServiceStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new hive catalog. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   ProjectName parent = ProjectName.of("[PROJECT]");
+   *   HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build();
+   *   String hiveCatalogId = "hiveCatalogId-575314556";
+   *   HiveCatalog response =
+   *       hiveMetastoreServiceClient.createHiveCatalog(parent, hiveCatalog, hiveCatalogId);
+   * }
+   * }
+ * + * @param parent Required. The parent resource where this catalog will be created. Format: + * projects/{project_id_or_number} + * @param hiveCatalog Required. The catalog to create. The `name` field does not need to be + * provided. Gets copied over from catalog_id. + * @param hiveCatalogId Required. The Hive Catalog ID to use for the catalog that will become the + * final component of the catalog's resource name. The maximum length is 256 characters. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveCatalog createHiveCatalog( + ProjectName parent, HiveCatalog hiveCatalog, String hiveCatalogId) { + CreateHiveCatalogRequest request = + CreateHiveCatalogRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setHiveCatalog(hiveCatalog) + .setHiveCatalogId(hiveCatalogId) + .build(); + return createHiveCatalog(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new hive catalog. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String parent = ProjectName.of("[PROJECT]").toString();
+   *   HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build();
+   *   String hiveCatalogId = "hiveCatalogId-575314556";
+   *   HiveCatalog response =
+   *       hiveMetastoreServiceClient.createHiveCatalog(parent, hiveCatalog, hiveCatalogId);
+   * }
+   * }
+ * + * @param parent Required. The parent resource where this catalog will be created. Format: + * projects/{project_id_or_number} + * @param hiveCatalog Required. The catalog to create. The `name` field does not need to be + * provided. Gets copied over from catalog_id. + * @param hiveCatalogId Required. The Hive Catalog ID to use for the catalog that will become the + * final component of the catalog's resource name. The maximum length is 256 characters. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveCatalog createHiveCatalog( + String parent, HiveCatalog hiveCatalog, String hiveCatalogId) { + CreateHiveCatalogRequest request = + CreateHiveCatalogRequest.newBuilder() + .setParent(parent) + .setHiveCatalog(hiveCatalog) + .setHiveCatalogId(hiveCatalogId) + .build(); + return createHiveCatalog(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new hive catalog. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   CreateHiveCatalogRequest request =
+   *       CreateHiveCatalogRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setHiveCatalog(HiveCatalog.newBuilder().build())
+   *           .setHiveCatalogId("hiveCatalogId-575314556")
+   *           .setPrimaryLocation("primaryLocation-1140723753")
+   *           .build();
+   *   HiveCatalog response = hiveMetastoreServiceClient.createHiveCatalog(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveCatalog createHiveCatalog(CreateHiveCatalogRequest request) { + return createHiveCatalogCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new hive catalog. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   CreateHiveCatalogRequest request =
+   *       CreateHiveCatalogRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setHiveCatalog(HiveCatalog.newBuilder().build())
+   *           .setHiveCatalogId("hiveCatalogId-575314556")
+   *           .setPrimaryLocation("primaryLocation-1140723753")
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.createHiveCatalogCallable().futureCall(request);
+   *   // Do something.
+   *   HiveCatalog response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createHiveCatalogCallable() { + return stub.createHiveCatalogCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the catalog specified by the resource name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   CatalogName name = CatalogName.of("[PROJECT]", "[CATALOG]");
+   *   HiveCatalog response = hiveMetastoreServiceClient.getHiveCatalog(name);
+   * }
+   * }
+ * + * @param name Required. The name of the catalog to retrieve. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveCatalog getHiveCatalog(CatalogName name) { + GetHiveCatalogRequest request = + GetHiveCatalogRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getHiveCatalog(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the catalog specified by the resource name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String name = CatalogName.of("[PROJECT]", "[CATALOG]").toString();
+   *   HiveCatalog response = hiveMetastoreServiceClient.getHiveCatalog(name);
+   * }
+   * }
+ * + * @param name Required. The name of the catalog to retrieve. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveCatalog getHiveCatalog(String name) { + GetHiveCatalogRequest request = GetHiveCatalogRequest.newBuilder().setName(name).build(); + return getHiveCatalog(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the catalog specified by the resource name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   GetHiveCatalogRequest request =
+   *       GetHiveCatalogRequest.newBuilder()
+   *           .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString())
+   *           .build();
+   *   HiveCatalog response = hiveMetastoreServiceClient.getHiveCatalog(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveCatalog getHiveCatalog(GetHiveCatalogRequest request) { + return getHiveCatalogCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the catalog specified by the resource name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   GetHiveCatalogRequest request =
+   *       GetHiveCatalogRequest.newBuilder()
+   *           .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.getHiveCatalogCallable().futureCall(request);
+   *   // Do something.
+   *   HiveCatalog response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getHiveCatalogCallable() { + return stub.getHiveCatalogCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all catalogs in a specified project. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   ProjectName parent = ProjectName.of("[PROJECT]");
+   *   for (HiveCatalog element : hiveMetastoreServiceClient.listHiveCatalogs(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The project to list catalogs from. Format: + * projects/{project_id_or_number} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListHiveCatalogsPagedResponse listHiveCatalogs(ProjectName parent) { + ListHiveCatalogsRequest request = + ListHiveCatalogsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listHiveCatalogs(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all catalogs in a specified project. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String parent = ProjectName.of("[PROJECT]").toString();
+   *   for (HiveCatalog element : hiveMetastoreServiceClient.listHiveCatalogs(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The project to list catalogs from. Format: + * projects/{project_id_or_number} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListHiveCatalogsPagedResponse listHiveCatalogs(String parent) { + ListHiveCatalogsRequest request = + ListHiveCatalogsRequest.newBuilder().setParent(parent).build(); + return listHiveCatalogs(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all catalogs in a specified project. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   ListHiveCatalogsRequest request =
+   *       ListHiveCatalogsRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (HiveCatalog element :
+   *       hiveMetastoreServiceClient.listHiveCatalogs(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListHiveCatalogsPagedResponse listHiveCatalogs(ListHiveCatalogsRequest request) { + return listHiveCatalogsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all catalogs in a specified project. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   ListHiveCatalogsRequest request =
+   *       ListHiveCatalogsRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.listHiveCatalogsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (HiveCatalog element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listHiveCatalogsPagedCallable() { + return stub.listHiveCatalogsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all catalogs in a specified project. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   ListHiveCatalogsRequest request =
+   *       ListHiveCatalogsRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListHiveCatalogsResponse response =
+   *         hiveMetastoreServiceClient.listHiveCatalogsCallable().call(request);
+   *     for (HiveCatalog element : response.getCatalogsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listHiveCatalogsCallable() { + return stub.listHiveCatalogsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an existing catalog. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   HiveCatalog response = hiveMetastoreServiceClient.updateHiveCatalog(hiveCatalog, updateMask);
+   * }
+   * }
+ * + * @param hiveCatalog Required. The hive catalog to update. The name under the catalog is used to + * identify the catalog. Format: projects/{project_id_or_number}/catalogs/{catalog_id} + * @param updateMask Optional. The list of fields to update. + *

For the `FieldMask` definition, see + * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask If + * not set, defaults to all of the fields that are allowed to update. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveCatalog updateHiveCatalog(HiveCatalog hiveCatalog, FieldMask updateMask) { + UpdateHiveCatalogRequest request = + UpdateHiveCatalogRequest.newBuilder() + .setHiveCatalog(hiveCatalog) + .setUpdateMask(updateMask) + .build(); + return updateHiveCatalog(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an existing catalog. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   UpdateHiveCatalogRequest request =
+   *       UpdateHiveCatalogRequest.newBuilder()
+   *           .setHiveCatalog(HiveCatalog.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   HiveCatalog response = hiveMetastoreServiceClient.updateHiveCatalog(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveCatalog updateHiveCatalog(UpdateHiveCatalogRequest request) { + return updateHiveCatalogCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an existing catalog. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   UpdateHiveCatalogRequest request =
+   *       UpdateHiveCatalogRequest.newBuilder()
+   *           .setHiveCatalog(HiveCatalog.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.updateHiveCatalogCallable().futureCall(request);
+   *   // Do something.
+   *   HiveCatalog response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable updateHiveCatalogCallable() { + return stub.updateHiveCatalogCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing catalog specified by the catalog ID. Delete will fail if the catalog is not + * empty. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   CatalogName name = CatalogName.of("[PROJECT]", "[CATALOG]");
+   *   hiveMetastoreServiceClient.deleteHiveCatalog(name);
+   * }
+   * }
+ * + * @param name Required. The name of the catalog to delete. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteHiveCatalog(CatalogName name) { + DeleteHiveCatalogRequest request = + DeleteHiveCatalogRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + deleteHiveCatalog(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing catalog specified by the catalog ID. Delete will fail if the catalog is not + * empty. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String name = CatalogName.of("[PROJECT]", "[CATALOG]").toString();
+   *   hiveMetastoreServiceClient.deleteHiveCatalog(name);
+   * }
+   * }
+ * + * @param name Required. The name of the catalog to delete. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteHiveCatalog(String name) { + DeleteHiveCatalogRequest request = DeleteHiveCatalogRequest.newBuilder().setName(name).build(); + deleteHiveCatalog(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing catalog specified by the catalog ID. Delete will fail if the catalog is not + * empty. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   DeleteHiveCatalogRequest request =
+   *       DeleteHiveCatalogRequest.newBuilder()
+   *           .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString())
+   *           .build();
+   *   hiveMetastoreServiceClient.deleteHiveCatalog(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteHiveCatalog(DeleteHiveCatalogRequest request) { + deleteHiveCatalogCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing catalog specified by the catalog ID. Delete will fail if the catalog is not + * empty. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   DeleteHiveCatalogRequest request =
+   *       DeleteHiveCatalogRequest.newBuilder()
+   *           .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.deleteHiveCatalogCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteHiveCatalogCallable() { + return stub.deleteHiveCatalogCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   CatalogName parent = CatalogName.of("[PROJECT]", "[CATALOG]");
+   *   HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build();
+   *   String hiveDatabaseId = "hiveDatabaseId-1150232698";
+   *   HiveDatabase response =
+   *       hiveMetastoreServiceClient.createHiveDatabase(parent, hiveDatabase, hiveDatabaseId);
+   * }
+   * }
+ * + * @param parent Required. The parent resource where this database will be created. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id} + * @param hiveDatabase Required. The database to create. The `name` field does not need to be + * provided. + * @param hiveDatabaseId Required. The ID to use for the Hive Database. The maximum length is 128 + * characters. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveDatabase createHiveDatabase( + CatalogName parent, HiveDatabase hiveDatabase, String hiveDatabaseId) { + CreateHiveDatabaseRequest request = + CreateHiveDatabaseRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setHiveDatabase(hiveDatabase) + .setHiveDatabaseId(hiveDatabaseId) + .build(); + return createHiveDatabase(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String parent = CatalogName.of("[PROJECT]", "[CATALOG]").toString();
+   *   HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build();
+   *   String hiveDatabaseId = "hiveDatabaseId-1150232698";
+   *   HiveDatabase response =
+   *       hiveMetastoreServiceClient.createHiveDatabase(parent, hiveDatabase, hiveDatabaseId);
+   * }
+   * }
+ * + * @param parent Required. The parent resource where this database will be created. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id} + * @param hiveDatabase Required. The database to create. The `name` field does not need to be + * provided. + * @param hiveDatabaseId Required. The ID to use for the Hive Database. The maximum length is 128 + * characters. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveDatabase createHiveDatabase( + String parent, HiveDatabase hiveDatabase, String hiveDatabaseId) { + CreateHiveDatabaseRequest request = + CreateHiveDatabaseRequest.newBuilder() + .setParent(parent) + .setHiveDatabase(hiveDatabase) + .setHiveDatabaseId(hiveDatabaseId) + .build(); + return createHiveDatabase(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   CreateHiveDatabaseRequest request =
+   *       CreateHiveDatabaseRequest.newBuilder()
+   *           .setParent(CatalogName.of("[PROJECT]", "[CATALOG]").toString())
+   *           .setHiveDatabase(HiveDatabase.newBuilder().build())
+   *           .setHiveDatabaseId("hiveDatabaseId-1150232698")
+   *           .build();
+   *   HiveDatabase response = hiveMetastoreServiceClient.createHiveDatabase(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveDatabase createHiveDatabase(CreateHiveDatabaseRequest request) { + return createHiveDatabaseCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   CreateHiveDatabaseRequest request =
+   *       CreateHiveDatabaseRequest.newBuilder()
+   *           .setParent(CatalogName.of("[PROJECT]", "[CATALOG]").toString())
+   *           .setHiveDatabase(HiveDatabase.newBuilder().build())
+   *           .setHiveDatabaseId("hiveDatabaseId-1150232698")
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.createHiveDatabaseCallable().futureCall(request);
+   *   // Do something.
+   *   HiveDatabase response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createHiveDatabaseCallable() { + return stub.createHiveDatabaseCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the database specified by the resource name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   NamespaceName name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]");
+   *   HiveDatabase response = hiveMetastoreServiceClient.getHiveDatabase(name);
+   * }
+   * }
+ * + * @param name Required. The name of the database to retrieve. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveDatabase getHiveDatabase(NamespaceName name) { + GetHiveDatabaseRequest request = + GetHiveDatabaseRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getHiveDatabase(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the database specified by the resource name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString();
+   *   HiveDatabase response = hiveMetastoreServiceClient.getHiveDatabase(name);
+   * }
+   * }
+ * + * @param name Required. The name of the database to retrieve. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveDatabase getHiveDatabase(String name) { + GetHiveDatabaseRequest request = GetHiveDatabaseRequest.newBuilder().setName(name).build(); + return getHiveDatabase(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the database specified by the resource name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   GetHiveDatabaseRequest request =
+   *       GetHiveDatabaseRequest.newBuilder()
+   *           .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString())
+   *           .build();
+   *   HiveDatabase response = hiveMetastoreServiceClient.getHiveDatabase(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveDatabase getHiveDatabase(GetHiveDatabaseRequest request) { + return getHiveDatabaseCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the database specified by the resource name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   GetHiveDatabaseRequest request =
+   *       GetHiveDatabaseRequest.newBuilder()
+   *           .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.getHiveDatabaseCallable().futureCall(request);
+   *   // Do something.
+   *   HiveDatabase response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getHiveDatabaseCallable() { + return stub.getHiveDatabaseCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all databases in a specified catalog. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   CatalogName parent = CatalogName.of("[PROJECT]", "[CATALOG]");
+   *   for (HiveDatabase element :
+   *       hiveMetastoreServiceClient.listHiveDatabases(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The hive catalog to list databases from. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListHiveDatabasesPagedResponse listHiveDatabases(CatalogName parent) { + ListHiveDatabasesRequest request = + ListHiveDatabasesRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listHiveDatabases(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all databases in a specified catalog. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String parent = CatalogName.of("[PROJECT]", "[CATALOG]").toString();
+   *   for (HiveDatabase element :
+   *       hiveMetastoreServiceClient.listHiveDatabases(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The hive catalog to list databases from. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListHiveDatabasesPagedResponse listHiveDatabases(String parent) { + ListHiveDatabasesRequest request = + ListHiveDatabasesRequest.newBuilder().setParent(parent).build(); + return listHiveDatabases(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all databases in a specified catalog. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   ListHiveDatabasesRequest request =
+   *       ListHiveDatabasesRequest.newBuilder()
+   *           .setParent(CatalogName.of("[PROJECT]", "[CATALOG]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (HiveDatabase element :
+   *       hiveMetastoreServiceClient.listHiveDatabases(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListHiveDatabasesPagedResponse listHiveDatabases(ListHiveDatabasesRequest request) { + return listHiveDatabasesPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all databases in a specified catalog. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   ListHiveDatabasesRequest request =
+   *       ListHiveDatabasesRequest.newBuilder()
+   *           .setParent(CatalogName.of("[PROJECT]", "[CATALOG]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.listHiveDatabasesPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (HiveDatabase element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listHiveDatabasesPagedCallable() { + return stub.listHiveDatabasesPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all databases in a specified catalog. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   ListHiveDatabasesRequest request =
+   *       ListHiveDatabasesRequest.newBuilder()
+   *           .setParent(CatalogName.of("[PROJECT]", "[CATALOG]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListHiveDatabasesResponse response =
+   *         hiveMetastoreServiceClient.listHiveDatabasesCallable().call(request);
+   *     for (HiveDatabase element : response.getDatabasesList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listHiveDatabasesCallable() { + return stub.listHiveDatabasesCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an existing database specified by the database name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   HiveDatabase response =
+   *       hiveMetastoreServiceClient.updateHiveDatabase(hiveDatabase, updateMask);
+   * }
+   * }
+ * + * @param hiveDatabase Required. The database to update. + *

The database's `name` field is used to identify the database to update. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + * @param updateMask Optional. The list of fields to update. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveDatabase updateHiveDatabase(HiveDatabase hiveDatabase, FieldMask updateMask) { + UpdateHiveDatabaseRequest request = + UpdateHiveDatabaseRequest.newBuilder() + .setHiveDatabase(hiveDatabase) + .setUpdateMask(updateMask) + .build(); + return updateHiveDatabase(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an existing database specified by the database name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   UpdateHiveDatabaseRequest request =
+   *       UpdateHiveDatabaseRequest.newBuilder()
+   *           .setHiveDatabase(HiveDatabase.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   HiveDatabase response = hiveMetastoreServiceClient.updateHiveDatabase(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveDatabase updateHiveDatabase(UpdateHiveDatabaseRequest request) { + return updateHiveDatabaseCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an existing database specified by the database name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   UpdateHiveDatabaseRequest request =
+   *       UpdateHiveDatabaseRequest.newBuilder()
+   *           .setHiveDatabase(HiveDatabase.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.updateHiveDatabaseCallable().futureCall(request);
+   *   // Do something.
+   *   HiveDatabase response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable updateHiveDatabaseCallable() { + return stub.updateHiveDatabaseCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing database specified by the database name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   NamespaceName name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]");
+   *   hiveMetastoreServiceClient.deleteHiveDatabase(name);
+   * }
+   * }
+ * + * @param name Required. The name of the database to delete. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteHiveDatabase(NamespaceName name) { + DeleteHiveDatabaseRequest request = + DeleteHiveDatabaseRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + deleteHiveDatabase(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing database specified by the database name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString();
+   *   hiveMetastoreServiceClient.deleteHiveDatabase(name);
+   * }
+   * }
+ * + * @param name Required. The name of the database to delete. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteHiveDatabase(String name) { + DeleteHiveDatabaseRequest request = + DeleteHiveDatabaseRequest.newBuilder().setName(name).build(); + deleteHiveDatabase(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing database specified by the database name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   DeleteHiveDatabaseRequest request =
+   *       DeleteHiveDatabaseRequest.newBuilder()
+   *           .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString())
+   *           .build();
+   *   hiveMetastoreServiceClient.deleteHiveDatabase(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteHiveDatabase(DeleteHiveDatabaseRequest request) { + deleteHiveDatabaseCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing database specified by the database name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   DeleteHiveDatabaseRequest request =
+   *       DeleteHiveDatabaseRequest.newBuilder()
+   *           .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.deleteHiveDatabaseCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteHiveDatabaseCallable() { + return stub.deleteHiveDatabaseCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new hive table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   NamespaceName parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]");
+   *   HiveTable hiveTable = HiveTable.newBuilder().build();
+   *   String hiveTableId = "hiveTableId152241145";
+   *   HiveTable response =
+   *       hiveMetastoreServiceClient.createHiveTable(parent, hiveTable, hiveTableId);
+   * }
+   * }
+ * + * @param parent Required. The parent resource for the table to be created. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + * @param hiveTable Required. The Hive Table to create. The `name` field does not need to be + * provided. + * @param hiveTableId Required. The Hive Table ID to use for the table that will become the final + * component of the table's resource name. The maximum length is 256 characters. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveTable createHiveTable( + NamespaceName parent, HiveTable hiveTable, String hiveTableId) { + CreateHiveTableRequest request = + CreateHiveTableRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setHiveTable(hiveTable) + .setHiveTableId(hiveTableId) + .build(); + return createHiveTable(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new hive table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString();
+   *   HiveTable hiveTable = HiveTable.newBuilder().build();
+   *   String hiveTableId = "hiveTableId152241145";
+   *   HiveTable response =
+   *       hiveMetastoreServiceClient.createHiveTable(parent, hiveTable, hiveTableId);
+   * }
+   * }
+ * + * @param parent Required. The parent resource for the table to be created. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + * @param hiveTable Required. The Hive Table to create. The `name` field does not need to be + * provided. + * @param hiveTableId Required. The Hive Table ID to use for the table that will become the final + * component of the table's resource name. The maximum length is 256 characters. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveTable createHiveTable(String parent, HiveTable hiveTable, String hiveTableId) { + CreateHiveTableRequest request = + CreateHiveTableRequest.newBuilder() + .setParent(parent) + .setHiveTable(hiveTable) + .setHiveTableId(hiveTableId) + .build(); + return createHiveTable(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new hive table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   CreateHiveTableRequest request =
+   *       CreateHiveTableRequest.newBuilder()
+   *           .setParent(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString())
+   *           .setHiveTable(HiveTable.newBuilder().build())
+   *           .setHiveTableId("hiveTableId152241145")
+   *           .build();
+   *   HiveTable response = hiveMetastoreServiceClient.createHiveTable(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveTable createHiveTable(CreateHiveTableRequest request) { + return createHiveTableCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new hive table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   CreateHiveTableRequest request =
+   *       CreateHiveTableRequest.newBuilder()
+   *           .setParent(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString())
+   *           .setHiveTable(HiveTable.newBuilder().build())
+   *           .setHiveTableId("hiveTableId152241145")
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.createHiveTableCallable().futureCall(request);
+   *   // Do something.
+   *   HiveTable response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createHiveTableCallable() { + return stub.createHiveTableCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the table specified by the resource name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   TableName name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]");
+   *   HiveTable response = hiveMetastoreServiceClient.getHiveTable(name);
+   * }
+   * }
+ * + * @param name Required. The name of the table to retrieve. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveTable getHiveTable(TableName name) { + GetHiveTableRequest request = + GetHiveTableRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getHiveTable(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the table specified by the resource name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString();
+   *   HiveTable response = hiveMetastoreServiceClient.getHiveTable(name);
+   * }
+   * }
+ * + * @param name Required. The name of the table to retrieve. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveTable getHiveTable(String name) { + GetHiveTableRequest request = GetHiveTableRequest.newBuilder().setName(name).build(); + return getHiveTable(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the table specified by the resource name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   GetHiveTableRequest request =
+   *       GetHiveTableRequest.newBuilder()
+   *           .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString())
+   *           .build();
+   *   HiveTable response = hiveMetastoreServiceClient.getHiveTable(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveTable getHiveTable(GetHiveTableRequest request) { + return getHiveTableCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the table specified by the resource name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   GetHiveTableRequest request =
+   *       GetHiveTableRequest.newBuilder()
+   *           .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.getHiveTableCallable().futureCall(request);
+   *   // Do something.
+   *   HiveTable response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getHiveTableCallable() { + return stub.getHiveTableCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all hive tables in a specified project under the hive catalog and database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   NamespaceName parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]");
+   *   for (HiveTable element : hiveMetastoreServiceClient.listHiveTables(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The database to list tables from. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListHiveTablesPagedResponse listHiveTables(NamespaceName parent) { + ListHiveTablesRequest request = + ListHiveTablesRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listHiveTables(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all hive tables in a specified project under the hive catalog and database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString();
+   *   for (HiveTable element : hiveMetastoreServiceClient.listHiveTables(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The database to list tables from. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListHiveTablesPagedResponse listHiveTables(String parent) { + ListHiveTablesRequest request = ListHiveTablesRequest.newBuilder().setParent(parent).build(); + return listHiveTables(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all hive tables in a specified project under the hive catalog and database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   ListHiveTablesRequest request =
+   *       ListHiveTablesRequest.newBuilder()
+   *           .setParent(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (HiveTable element : hiveMetastoreServiceClient.listHiveTables(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListHiveTablesPagedResponse listHiveTables(ListHiveTablesRequest request) { + return listHiveTablesPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all hive tables in a specified project under the hive catalog and database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   ListHiveTablesRequest request =
+   *       ListHiveTablesRequest.newBuilder()
+   *           .setParent(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.listHiveTablesPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (HiveTable element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listHiveTablesPagedCallable() { + return stub.listHiveTablesPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * List all hive tables in a specified project under the hive catalog and database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   ListHiveTablesRequest request =
+   *       ListHiveTablesRequest.newBuilder()
+   *           .setParent(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListHiveTablesResponse response =
+   *         hiveMetastoreServiceClient.listHiveTablesCallable().call(request);
+   *     for (HiveTable element : response.getTablesList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listHiveTablesCallable() { + return stub.listHiveTablesCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an existing table specified by the table name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   HiveTable hiveTable = HiveTable.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   HiveTable response = hiveMetastoreServiceClient.updateHiveTable(hiveTable, updateMask);
+   * }
+   * }
+ * + * @param hiveTable Required. The table to update. + *

The table's `name` field is used to identify the table to update. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + * @param updateMask Optional. The list of fields to update. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveTable updateHiveTable(HiveTable hiveTable, FieldMask updateMask) { + UpdateHiveTableRequest request = + UpdateHiveTableRequest.newBuilder() + .setHiveTable(hiveTable) + .setUpdateMask(updateMask) + .build(); + return updateHiveTable(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an existing table specified by the table name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   UpdateHiveTableRequest request =
+   *       UpdateHiveTableRequest.newBuilder()
+   *           .setHiveTable(HiveTable.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   HiveTable response = hiveMetastoreServiceClient.updateHiveTable(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final HiveTable updateHiveTable(UpdateHiveTableRequest request) { + return updateHiveTableCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an existing table specified by the table name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   UpdateHiveTableRequest request =
+   *       UpdateHiveTableRequest.newBuilder()
+   *           .setHiveTable(HiveTable.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.updateHiveTableCallable().futureCall(request);
+   *   // Do something.
+   *   HiveTable response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable updateHiveTableCallable() { + return stub.updateHiveTableCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing table specified by the table name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   TableName name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]");
+   *   hiveMetastoreServiceClient.deleteHiveTable(name);
+   * }
+   * }
+ * + * @param name Required. The name of the database to delete. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteHiveTable(TableName name) { + DeleteHiveTableRequest request = + DeleteHiveTableRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + deleteHiveTable(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing table specified by the table name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString();
+   *   hiveMetastoreServiceClient.deleteHiveTable(name);
+   * }
+   * }
+ * + * @param name Required. The name of the database to delete. Format: + * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteHiveTable(String name) { + DeleteHiveTableRequest request = DeleteHiveTableRequest.newBuilder().setName(name).build(); + deleteHiveTable(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing table specified by the table name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   DeleteHiveTableRequest request =
+   *       DeleteHiveTableRequest.newBuilder()
+   *           .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString())
+   *           .build();
+   *   hiveMetastoreServiceClient.deleteHiveTable(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteHiveTable(DeleteHiveTableRequest request) { + deleteHiveTableCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing table specified by the table name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   DeleteHiveTableRequest request =
+   *       DeleteHiveTableRequest.newBuilder()
+   *           .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.deleteHiveTableCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteHiveTableCallable() { + return stub.deleteHiveTableCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Adds partitions to a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]");
+   *   BatchCreatePartitionsResponse response =
+   *       hiveMetastoreServiceClient.batchCreatePartitions(parent);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to where the partitions to be added, in the + * format of projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCreatePartitionsResponse batchCreatePartitions(TableName parent) { + BatchCreatePartitionsRequest request = + BatchCreatePartitionsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return batchCreatePartitions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Adds partitions to a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString();
+   *   BatchCreatePartitionsResponse response =
+   *       hiveMetastoreServiceClient.batchCreatePartitions(parent);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to where the partitions to be added, in the + * format of projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCreatePartitionsResponse batchCreatePartitions(String parent) { + BatchCreatePartitionsRequest request = + BatchCreatePartitionsRequest.newBuilder().setParent(parent).build(); + return batchCreatePartitions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Adds partitions to a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   BatchCreatePartitionsRequest request =
+   *       BatchCreatePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString())
+   *           .addAllRequests(new ArrayList())
+   *           .setSkipExistingPartitions(true)
+   *           .build();
+   *   BatchCreatePartitionsResponse response =
+   *       hiveMetastoreServiceClient.batchCreatePartitions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCreatePartitionsResponse batchCreatePartitions( + BatchCreatePartitionsRequest request) { + return batchCreatePartitionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Adds partitions to a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   BatchCreatePartitionsRequest request =
+   *       BatchCreatePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString())
+   *           .addAllRequests(new ArrayList())
+   *           .setSkipExistingPartitions(true)
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.batchCreatePartitionsCallable().futureCall(request);
+   *   // Do something.
+   *   BatchCreatePartitionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + batchCreatePartitionsCallable() { + return stub.batchCreatePartitionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]");
+   *   hiveMetastoreServiceClient.batchDeletePartitions(parent);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which these partitions belong, in the format + * of projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void batchDeletePartitions(TableName parent) { + BatchDeletePartitionsRequest request = + BatchDeletePartitionsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + batchDeletePartitions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString();
+   *   hiveMetastoreServiceClient.batchDeletePartitions(parent);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which these partitions belong, in the format + * of projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void batchDeletePartitions(String parent) { + BatchDeletePartitionsRequest request = + BatchDeletePartitionsRequest.newBuilder().setParent(parent).build(); + batchDeletePartitions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   BatchDeletePartitionsRequest request =
+   *       BatchDeletePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString())
+   *           .addAllPartitionValues(new ArrayList())
+   *           .build();
+   *   hiveMetastoreServiceClient.batchDeletePartitions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void batchDeletePartitions(BatchDeletePartitionsRequest request) { + batchDeletePartitionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   BatchDeletePartitionsRequest request =
+   *       BatchDeletePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString())
+   *           .addAllPartitionValues(new ArrayList())
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.batchDeletePartitionsCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable batchDeletePartitionsCallable() { + return stub.batchDeletePartitionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates partitions in a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]");
+   *   BatchUpdatePartitionsResponse response =
+   *       hiveMetastoreServiceClient.batchUpdatePartitions(parent);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which these partitions belong, in the format + * of projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchUpdatePartitionsResponse batchUpdatePartitions(TableName parent) { + BatchUpdatePartitionsRequest request = + BatchUpdatePartitionsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return batchUpdatePartitions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates partitions in a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   String parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString();
+   *   BatchUpdatePartitionsResponse response =
+   *       hiveMetastoreServiceClient.batchUpdatePartitions(parent);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which these partitions belong, in the format + * of projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchUpdatePartitionsResponse batchUpdatePartitions(String parent) { + BatchUpdatePartitionsRequest request = + BatchUpdatePartitionsRequest.newBuilder().setParent(parent).build(); + return batchUpdatePartitions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates partitions in a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   BatchUpdatePartitionsRequest request =
+   *       BatchUpdatePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString())
+   *           .addAllRequests(new ArrayList())
+   *           .build();
+   *   BatchUpdatePartitionsResponse response =
+   *       hiveMetastoreServiceClient.batchUpdatePartitions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchUpdatePartitionsResponse batchUpdatePartitions( + BatchUpdatePartitionsRequest request) { + return batchUpdatePartitionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates partitions in a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   BatchUpdatePartitionsRequest request =
+   *       BatchUpdatePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString())
+   *           .addAllRequests(new ArrayList())
+   *           .build();
+   *   ApiFuture future =
+   *       hiveMetastoreServiceClient.batchUpdatePartitionsCallable().futureCall(request);
+   *   // Do something.
+   *   BatchUpdatePartitionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + batchUpdatePartitionsCallable() { + return stub.batchUpdatePartitionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Streams list of partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+   *     HiveMetastoreServiceClient.create()) {
+   *   ListPartitionsRequest request =
+   *       ListPartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .build();
+   *   ServerStream stream =
+   *       hiveMetastoreServiceClient.listPartitionsCallable().call(request);
+   *   for (ListPartitionsResponse response : stream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final ServerStreamingCallable + listPartitionsCallable() { + return stub.listPartitionsCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } + + public static class ListHiveCatalogsPagedResponse + extends AbstractPagedListResponse< + ListHiveCatalogsRequest, + ListHiveCatalogsResponse, + HiveCatalog, + ListHiveCatalogsPage, + ListHiveCatalogsFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListHiveCatalogsPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListHiveCatalogsPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListHiveCatalogsPagedResponse(ListHiveCatalogsPage page) { + super(page, ListHiveCatalogsFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListHiveCatalogsPage + extends AbstractPage< + ListHiveCatalogsRequest, ListHiveCatalogsResponse, HiveCatalog, ListHiveCatalogsPage> { + + private ListHiveCatalogsPage( + PageContext context, + ListHiveCatalogsResponse response) { + super(context, response); + } + + private static ListHiveCatalogsPage createEmptyPage() { + return new ListHiveCatalogsPage(null, null); + } + + @Override + protected ListHiveCatalogsPage createPage( + PageContext context, + ListHiveCatalogsResponse response) { + return new ListHiveCatalogsPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListHiveCatalogsFixedSizeCollection + extends AbstractFixedSizeCollection< + ListHiveCatalogsRequest, + ListHiveCatalogsResponse, + HiveCatalog, + ListHiveCatalogsPage, + ListHiveCatalogsFixedSizeCollection> { + + private ListHiveCatalogsFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListHiveCatalogsFixedSizeCollection createEmptyCollection() { + return new ListHiveCatalogsFixedSizeCollection(null, 0); + } + + @Override + protected ListHiveCatalogsFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListHiveCatalogsFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListHiveDatabasesPagedResponse + extends AbstractPagedListResponse< + ListHiveDatabasesRequest, + ListHiveDatabasesResponse, + HiveDatabase, + ListHiveDatabasesPage, + ListHiveDatabasesFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListHiveDatabasesPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListHiveDatabasesPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListHiveDatabasesPagedResponse(ListHiveDatabasesPage page) { + super(page, ListHiveDatabasesFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListHiveDatabasesPage + extends AbstractPage< + ListHiveDatabasesRequest, + ListHiveDatabasesResponse, + HiveDatabase, + ListHiveDatabasesPage> { + + private ListHiveDatabasesPage( + PageContext context, + ListHiveDatabasesResponse response) { + super(context, response); + } + + private static ListHiveDatabasesPage createEmptyPage() { + return new ListHiveDatabasesPage(null, null); + } + + @Override + protected ListHiveDatabasesPage createPage( + PageContext context, + ListHiveDatabasesResponse response) { + return new ListHiveDatabasesPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListHiveDatabasesFixedSizeCollection + extends AbstractFixedSizeCollection< + ListHiveDatabasesRequest, + ListHiveDatabasesResponse, + HiveDatabase, + ListHiveDatabasesPage, + ListHiveDatabasesFixedSizeCollection> { + + private ListHiveDatabasesFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListHiveDatabasesFixedSizeCollection createEmptyCollection() { + return new ListHiveDatabasesFixedSizeCollection(null, 0); + } + + @Override + protected ListHiveDatabasesFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListHiveDatabasesFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListHiveTablesPagedResponse + extends AbstractPagedListResponse< + ListHiveTablesRequest, + ListHiveTablesResponse, + HiveTable, + ListHiveTablesPage, + ListHiveTablesFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListHiveTablesPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListHiveTablesPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListHiveTablesPagedResponse(ListHiveTablesPage page) { + super(page, ListHiveTablesFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListHiveTablesPage + extends AbstractPage< + ListHiveTablesRequest, ListHiveTablesResponse, HiveTable, ListHiveTablesPage> { + + private ListHiveTablesPage( + PageContext context, + ListHiveTablesResponse response) { + super(context, response); + } + + private static ListHiveTablesPage createEmptyPage() { + return new ListHiveTablesPage(null, null); + } + + @Override + protected ListHiveTablesPage createPage( + PageContext context, + ListHiveTablesResponse response) { + return new ListHiveTablesPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListHiveTablesFixedSizeCollection + extends AbstractFixedSizeCollection< + ListHiveTablesRequest, + ListHiveTablesResponse, + HiveTable, + ListHiveTablesPage, + ListHiveTablesFixedSizeCollection> { + + private ListHiveTablesFixedSizeCollection(List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListHiveTablesFixedSizeCollection createEmptyCollection() { + return new ListHiveTablesFixedSizeCollection(null, 0); + } + + @Override + protected ListHiveTablesFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListHiveTablesFixedSizeCollection(pages, collectionSize); + } + } +} diff --git a/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceSettings.java b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceSettings.java new file mode 100644 index 000000000000..dd1239e06dc1 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceSettings.java @@ -0,0 +1,430 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta; + +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveCatalogsPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveDatabasesPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveTablesPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.httpjson.InstantiatingHttpJsonChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.biglake.hive.v1beta.stub.HiveMetastoreServiceStubSettings; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link HiveMetastoreServiceClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (biglake.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createHiveCatalog: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * HiveMetastoreServiceSettings.Builder hiveMetastoreServiceSettingsBuilder =
+ *     HiveMetastoreServiceSettings.newBuilder();
+ * hiveMetastoreServiceSettingsBuilder
+ *     .createHiveCatalogSettings()
+ *     .setRetrySettings(
+ *         hiveMetastoreServiceSettingsBuilder
+ *             .createHiveCatalogSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * HiveMetastoreServiceSettings hiveMetastoreServiceSettings =
+ *     hiveMetastoreServiceSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class HiveMetastoreServiceSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to createHiveCatalog. */ + public UnaryCallSettings createHiveCatalogSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).createHiveCatalogSettings(); + } + + /** Returns the object with the settings used for calls to getHiveCatalog. */ + public UnaryCallSettings getHiveCatalogSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).getHiveCatalogSettings(); + } + + /** Returns the object with the settings used for calls to listHiveCatalogs. */ + public PagedCallSettings< + ListHiveCatalogsRequest, ListHiveCatalogsResponse, ListHiveCatalogsPagedResponse> + listHiveCatalogsSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).listHiveCatalogsSettings(); + } + + /** Returns the object with the settings used for calls to updateHiveCatalog. */ + public UnaryCallSettings updateHiveCatalogSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).updateHiveCatalogSettings(); + } + + /** Returns the object with the settings used for calls to deleteHiveCatalog. */ + public UnaryCallSettings deleteHiveCatalogSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).deleteHiveCatalogSettings(); + } + + /** Returns the object with the settings used for calls to createHiveDatabase. */ + public UnaryCallSettings createHiveDatabaseSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).createHiveDatabaseSettings(); + } + + /** Returns the object with the settings used for calls to getHiveDatabase. */ + public UnaryCallSettings getHiveDatabaseSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).getHiveDatabaseSettings(); + } + + /** Returns the object with the settings used for calls to listHiveDatabases. */ + public PagedCallSettings< + ListHiveDatabasesRequest, ListHiveDatabasesResponse, ListHiveDatabasesPagedResponse> + listHiveDatabasesSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).listHiveDatabasesSettings(); + } + + /** Returns the object with the settings used for calls to updateHiveDatabase. */ + public UnaryCallSettings updateHiveDatabaseSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).updateHiveDatabaseSettings(); + } + + /** Returns the object with the settings used for calls to deleteHiveDatabase. */ + public UnaryCallSettings deleteHiveDatabaseSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).deleteHiveDatabaseSettings(); + } + + /** Returns the object with the settings used for calls to createHiveTable. */ + public UnaryCallSettings createHiveTableSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).createHiveTableSettings(); + } + + /** Returns the object with the settings used for calls to getHiveTable. */ + public UnaryCallSettings getHiveTableSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).getHiveTableSettings(); + } + + /** Returns the object with the settings used for calls to listHiveTables. */ + public PagedCallSettings< + ListHiveTablesRequest, ListHiveTablesResponse, ListHiveTablesPagedResponse> + listHiveTablesSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).listHiveTablesSettings(); + } + + /** Returns the object with the settings used for calls to updateHiveTable. */ + public UnaryCallSettings updateHiveTableSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).updateHiveTableSettings(); + } + + /** Returns the object with the settings used for calls to deleteHiveTable. */ + public UnaryCallSettings deleteHiveTableSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).deleteHiveTableSettings(); + } + + /** Returns the object with the settings used for calls to batchCreatePartitions. */ + public UnaryCallSettings + batchCreatePartitionsSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).batchCreatePartitionsSettings(); + } + + /** Returns the object with the settings used for calls to batchDeletePartitions. */ + public UnaryCallSettings batchDeletePartitionsSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).batchDeletePartitionsSettings(); + } + + /** Returns the object with the settings used for calls to batchUpdatePartitions. */ + public UnaryCallSettings + batchUpdatePartitionsSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).batchUpdatePartitionsSettings(); + } + + /** Returns the object with the settings used for calls to listPartitions. */ + public ServerStreamingCallSettings + listPartitionsSettings() { + return ((HiveMetastoreServiceStubSettings) getStubSettings()).listPartitionsSettings(); + } + + public static final HiveMetastoreServiceSettings create(HiveMetastoreServiceStubSettings stub) + throws IOException { + return new HiveMetastoreServiceSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return HiveMetastoreServiceStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return HiveMetastoreServiceStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return HiveMetastoreServiceStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return HiveMetastoreServiceStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default gRPC ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return HiveMetastoreServiceStubSettings.defaultGrpcTransportProviderBuilder(); + } + + /** Returns a builder for the default REST ChannelProvider for this service. */ + @BetaApi + public static InstantiatingHttpJsonChannelProvider.Builder + defaultHttpJsonTransportProviderBuilder() { + return HiveMetastoreServiceStubSettings.defaultHttpJsonTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return HiveMetastoreServiceStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return HiveMetastoreServiceStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new gRPC builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new REST builder for this class. */ + public static Builder newHttpJsonBuilder() { + return Builder.createHttpJsonDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected HiveMetastoreServiceSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for HiveMetastoreServiceSettings. */ + public static class Builder + extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(HiveMetastoreServiceStubSettings.newBuilder(clientContext)); + } + + protected Builder(HiveMetastoreServiceSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(HiveMetastoreServiceStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(HiveMetastoreServiceStubSettings.newBuilder()); + } + + private static Builder createHttpJsonDefault() { + return new Builder(HiveMetastoreServiceStubSettings.newHttpJsonBuilder()); + } + + public HiveMetastoreServiceStubSettings.Builder getStubSettingsBuilder() { + return ((HiveMetastoreServiceStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createHiveCatalog. */ + public UnaryCallSettings.Builder + createHiveCatalogSettings() { + return getStubSettingsBuilder().createHiveCatalogSettings(); + } + + /** Returns the builder for the settings used for calls to getHiveCatalog. */ + public UnaryCallSettings.Builder getHiveCatalogSettings() { + return getStubSettingsBuilder().getHiveCatalogSettings(); + } + + /** Returns the builder for the settings used for calls to listHiveCatalogs. */ + public PagedCallSettings.Builder< + ListHiveCatalogsRequest, ListHiveCatalogsResponse, ListHiveCatalogsPagedResponse> + listHiveCatalogsSettings() { + return getStubSettingsBuilder().listHiveCatalogsSettings(); + } + + /** Returns the builder for the settings used for calls to updateHiveCatalog. */ + public UnaryCallSettings.Builder + updateHiveCatalogSettings() { + return getStubSettingsBuilder().updateHiveCatalogSettings(); + } + + /** Returns the builder for the settings used for calls to deleteHiveCatalog. */ + public UnaryCallSettings.Builder deleteHiveCatalogSettings() { + return getStubSettingsBuilder().deleteHiveCatalogSettings(); + } + + /** Returns the builder for the settings used for calls to createHiveDatabase. */ + public UnaryCallSettings.Builder + createHiveDatabaseSettings() { + return getStubSettingsBuilder().createHiveDatabaseSettings(); + } + + /** Returns the builder for the settings used for calls to getHiveDatabase. */ + public UnaryCallSettings.Builder + getHiveDatabaseSettings() { + return getStubSettingsBuilder().getHiveDatabaseSettings(); + } + + /** Returns the builder for the settings used for calls to listHiveDatabases. */ + public PagedCallSettings.Builder< + ListHiveDatabasesRequest, ListHiveDatabasesResponse, ListHiveDatabasesPagedResponse> + listHiveDatabasesSettings() { + return getStubSettingsBuilder().listHiveDatabasesSettings(); + } + + /** Returns the builder for the settings used for calls to updateHiveDatabase. */ + public UnaryCallSettings.Builder + updateHiveDatabaseSettings() { + return getStubSettingsBuilder().updateHiveDatabaseSettings(); + } + + /** Returns the builder for the settings used for calls to deleteHiveDatabase. */ + public UnaryCallSettings.Builder + deleteHiveDatabaseSettings() { + return getStubSettingsBuilder().deleteHiveDatabaseSettings(); + } + + /** Returns the builder for the settings used for calls to createHiveTable. */ + public UnaryCallSettings.Builder createHiveTableSettings() { + return getStubSettingsBuilder().createHiveTableSettings(); + } + + /** Returns the builder for the settings used for calls to getHiveTable. */ + public UnaryCallSettings.Builder getHiveTableSettings() { + return getStubSettingsBuilder().getHiveTableSettings(); + } + + /** Returns the builder for the settings used for calls to listHiveTables. */ + public PagedCallSettings.Builder< + ListHiveTablesRequest, ListHiveTablesResponse, ListHiveTablesPagedResponse> + listHiveTablesSettings() { + return getStubSettingsBuilder().listHiveTablesSettings(); + } + + /** Returns the builder for the settings used for calls to updateHiveTable. */ + public UnaryCallSettings.Builder updateHiveTableSettings() { + return getStubSettingsBuilder().updateHiveTableSettings(); + } + + /** Returns the builder for the settings used for calls to deleteHiveTable. */ + public UnaryCallSettings.Builder deleteHiveTableSettings() { + return getStubSettingsBuilder().deleteHiveTableSettings(); + } + + /** Returns the builder for the settings used for calls to batchCreatePartitions. */ + public UnaryCallSettings.Builder + batchCreatePartitionsSettings() { + return getStubSettingsBuilder().batchCreatePartitionsSettings(); + } + + /** Returns the builder for the settings used for calls to batchDeletePartitions. */ + public UnaryCallSettings.Builder + batchDeletePartitionsSettings() { + return getStubSettingsBuilder().batchDeletePartitionsSettings(); + } + + /** Returns the builder for the settings used for calls to batchUpdatePartitions. */ + public UnaryCallSettings.Builder + batchUpdatePartitionsSettings() { + return getStubSettingsBuilder().batchUpdatePartitionsSettings(); + } + + /** Returns the builder for the settings used for calls to listPartitions. */ + public ServerStreamingCallSettings.Builder + listPartitionsSettings() { + return getStubSettingsBuilder().listPartitionsSettings(); + } + + @Override + public HiveMetastoreServiceSettings build() throws IOException { + return new HiveMetastoreServiceSettings(this); + } + } +} diff --git a/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/gapic_metadata.json b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/gapic_metadata.json new file mode 100644 index 000000000000..b559df359195 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/gapic_metadata.json @@ -0,0 +1,75 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.cloud.biglake.hive.v1beta", + "libraryPackage": "com.google.cloud.biglake.hive.v1beta", + "services": { + "HiveMetastoreService": { + "clients": { + "grpc": { + "libraryClient": "HiveMetastoreServiceClient", + "rpcs": { + "BatchCreatePartitions": { + "methods": ["batchCreatePartitions", "batchCreatePartitions", "batchCreatePartitions", "batchCreatePartitionsCallable"] + }, + "BatchDeletePartitions": { + "methods": ["batchDeletePartitions", "batchDeletePartitions", "batchDeletePartitions", "batchDeletePartitionsCallable"] + }, + "BatchUpdatePartitions": { + "methods": ["batchUpdatePartitions", "batchUpdatePartitions", "batchUpdatePartitions", "batchUpdatePartitionsCallable"] + }, + "CreateHiveCatalog": { + "methods": ["createHiveCatalog", "createHiveCatalog", "createHiveCatalog", "createHiveCatalogCallable"] + }, + "CreateHiveDatabase": { + "methods": ["createHiveDatabase", "createHiveDatabase", "createHiveDatabase", "createHiveDatabaseCallable"] + }, + "CreateHiveTable": { + "methods": ["createHiveTable", "createHiveTable", "createHiveTable", "createHiveTableCallable"] + }, + "DeleteHiveCatalog": { + "methods": ["deleteHiveCatalog", "deleteHiveCatalog", "deleteHiveCatalog", "deleteHiveCatalogCallable"] + }, + "DeleteHiveDatabase": { + "methods": ["deleteHiveDatabase", "deleteHiveDatabase", "deleteHiveDatabase", "deleteHiveDatabaseCallable"] + }, + "DeleteHiveTable": { + "methods": ["deleteHiveTable", "deleteHiveTable", "deleteHiveTable", "deleteHiveTableCallable"] + }, + "GetHiveCatalog": { + "methods": ["getHiveCatalog", "getHiveCatalog", "getHiveCatalog", "getHiveCatalogCallable"] + }, + "GetHiveDatabase": { + "methods": ["getHiveDatabase", "getHiveDatabase", "getHiveDatabase", "getHiveDatabaseCallable"] + }, + "GetHiveTable": { + "methods": ["getHiveTable", "getHiveTable", "getHiveTable", "getHiveTableCallable"] + }, + "ListHiveCatalogs": { + "methods": ["listHiveCatalogs", "listHiveCatalogs", "listHiveCatalogs", "listHiveCatalogsPagedCallable", "listHiveCatalogsCallable"] + }, + "ListHiveDatabases": { + "methods": ["listHiveDatabases", "listHiveDatabases", "listHiveDatabases", "listHiveDatabasesPagedCallable", "listHiveDatabasesCallable"] + }, + "ListHiveTables": { + "methods": ["listHiveTables", "listHiveTables", "listHiveTables", "listHiveTablesPagedCallable", "listHiveTablesCallable"] + }, + "ListPartitions": { + "methods": ["listPartitionsCallable"] + }, + "UpdateHiveCatalog": { + "methods": ["updateHiveCatalog", "updateHiveCatalog", "updateHiveCatalogCallable"] + }, + "UpdateHiveDatabase": { + "methods": ["updateHiveDatabase", "updateHiveDatabase", "updateHiveDatabaseCallable"] + }, + "UpdateHiveTable": { + "methods": ["updateHiveTable", "updateHiveTable", "updateHiveTableCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/package-info.java b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/package-info.java new file mode 100644 index 000000000000..b35c700d6989 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/package-info.java @@ -0,0 +1,60 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to BigLake API + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= HiveMetastoreServiceClient ======================= + * + *

Service Description: Hive Metastore Service is a biglake service that allows users to manage + * their external Hive catalogs. Full API compatibility with OSS Hive Metastore APIs is not + * supported. The methods match the Hive Metastore API spec mostly except for a few exceptions. + * These include listing resources with pattern, environment context which are combined in a single + * List API, return of ListResponse object instead of a list of resources, transactions, locks, etc. + * + *

The BigLake Hive Metastore API defines the following resources: + * + *

    + *
  • A collection of Google Cloud projects: `/projects/*` + *
  • Each project has a collection of catalogs: `/catalogs/*` + *
  • Each catalog has a collection of databases: `/databases/*` + *
  • Each database has a collection of tables: `/tables/*` + *
+ * + *

Sample for HiveMetastoreServiceClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (HiveMetastoreServiceClient hiveMetastoreServiceClient =
+ *     HiveMetastoreServiceClient.create()) {
+ *   ProjectName parent = ProjectName.of("[PROJECT]");
+ *   HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build();
+ *   String hiveCatalogId = "hiveCatalogId-575314556";
+ *   HiveCatalog response =
+ *       hiveMetastoreServiceClient.createHiveCatalog(parent, hiveCatalog, hiveCatalogId);
+ * }
+ * }
+ */ +@Generated("by gapic-generator-java") +package com.google.cloud.biglake.hive.v1beta; + +import javax.annotation.Generated; diff --git a/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/GrpcHiveMetastoreServiceCallableFactory.java b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/GrpcHiveMetastoreServiceCallableFactory.java new file mode 100644 index 000000000000..3c579f5837f8 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/GrpcHiveMetastoreServiceCallableFactory.java @@ -0,0 +1,115 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the HiveMetastoreService service API. + * + *

This class is for advanced usage. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class GrpcHiveMetastoreServiceCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/GrpcHiveMetastoreServiceStub.java b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/GrpcHiveMetastoreServiceStub.java new file mode 100644 index 000000000000..10d037441b63 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/GrpcHiveMetastoreServiceStub.java @@ -0,0 +1,836 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.stub; + +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveCatalogsPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveDatabasesPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveTablesPagedResponse; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse; +import com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse; +import com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse; +import com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the HiveMetastoreService service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class GrpcHiveMetastoreServiceStub extends HiveMetastoreServiceStub { + private static final MethodDescriptor + createHiveCatalogMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/CreateHiveCatalog") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateHiveCatalogRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(HiveCatalog.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getHiveCatalogMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/GetHiveCatalog") + .setRequestMarshaller( + ProtoUtils.marshaller(GetHiveCatalogRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(HiveCatalog.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listHiveCatalogsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/ListHiveCatalogs") + .setRequestMarshaller( + ProtoUtils.marshaller(ListHiveCatalogsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListHiveCatalogsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + updateHiveCatalogMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/UpdateHiveCatalog") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateHiveCatalogRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(HiveCatalog.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + deleteHiveCatalogMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/DeleteHiveCatalog") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteHiveCatalogRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + createHiveDatabaseMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/CreateHiveDatabase") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateHiveDatabaseRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(HiveDatabase.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getHiveDatabaseMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/GetHiveDatabase") + .setRequestMarshaller( + ProtoUtils.marshaller(GetHiveDatabaseRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(HiveDatabase.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listHiveDatabasesMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/ListHiveDatabases") + .setRequestMarshaller( + ProtoUtils.marshaller(ListHiveDatabasesRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListHiveDatabasesResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + updateHiveDatabaseMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/UpdateHiveDatabase") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateHiveDatabaseRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(HiveDatabase.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + deleteHiveDatabaseMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/DeleteHiveDatabase") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteHiveDatabaseRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + createHiveTableMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/CreateHiveTable") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateHiveTableRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(HiveTable.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getHiveTableMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/GetHiveTable") + .setRequestMarshaller(ProtoUtils.marshaller(GetHiveTableRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(HiveTable.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listHiveTablesMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/ListHiveTables") + .setRequestMarshaller( + ProtoUtils.marshaller(ListHiveTablesRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListHiveTablesResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + updateHiveTableMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/UpdateHiveTable") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateHiveTableRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(HiveTable.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + deleteHiveTableMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/DeleteHiveTable") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteHiveTableRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + batchCreatePartitionsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/BatchCreatePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchCreatePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(BatchCreatePartitionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + batchDeletePartitionsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/BatchDeletePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchDeletePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + batchUpdatePartitionsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/BatchUpdatePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchUpdatePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(BatchUpdatePartitionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listPartitionsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/ListPartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(ListPartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListPartitionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable createHiveCatalogCallable; + private final UnaryCallable getHiveCatalogCallable; + private final UnaryCallable + listHiveCatalogsCallable; + private final UnaryCallable + listHiveCatalogsPagedCallable; + private final UnaryCallable updateHiveCatalogCallable; + private final UnaryCallable deleteHiveCatalogCallable; + private final UnaryCallable createHiveDatabaseCallable; + private final UnaryCallable getHiveDatabaseCallable; + private final UnaryCallable + listHiveDatabasesCallable; + private final UnaryCallable + listHiveDatabasesPagedCallable; + private final UnaryCallable updateHiveDatabaseCallable; + private final UnaryCallable deleteHiveDatabaseCallable; + private final UnaryCallable createHiveTableCallable; + private final UnaryCallable getHiveTableCallable; + private final UnaryCallable listHiveTablesCallable; + private final UnaryCallable + listHiveTablesPagedCallable; + private final UnaryCallable updateHiveTableCallable; + private final UnaryCallable deleteHiveTableCallable; + private final UnaryCallable + batchCreatePartitionsCallable; + private final UnaryCallable batchDeletePartitionsCallable; + private final UnaryCallable + batchUpdatePartitionsCallable; + private final ServerStreamingCallable + listPartitionsCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcHiveMetastoreServiceStub create(HiveMetastoreServiceStubSettings settings) + throws IOException { + return new GrpcHiveMetastoreServiceStub(settings, ClientContext.create(settings)); + } + + public static final GrpcHiveMetastoreServiceStub create(ClientContext clientContext) + throws IOException { + return new GrpcHiveMetastoreServiceStub( + HiveMetastoreServiceStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcHiveMetastoreServiceStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcHiveMetastoreServiceStub( + HiveMetastoreServiceStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcHiveMetastoreServiceStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcHiveMetastoreServiceStub( + HiveMetastoreServiceStubSettings settings, ClientContext clientContext) throws IOException { + this(settings, clientContext, new GrpcHiveMetastoreServiceCallableFactory()); + } + + /** + * Constructs an instance of GrpcHiveMetastoreServiceStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcHiveMetastoreServiceStub( + HiveMetastoreServiceStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings createHiveCatalogTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createHiveCatalogMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings getHiveCatalogTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getHiveCatalogMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + listHiveCatalogsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listHiveCatalogsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings updateHiveCatalogTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateHiveCatalogMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "hive_catalog.name", String.valueOf(request.getHiveCatalog().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings deleteHiveCatalogTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteHiveCatalogMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings createHiveDatabaseTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createHiveDatabaseMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings getHiveDatabaseTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getHiveDatabaseMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + listHiveDatabasesTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listHiveDatabasesMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings updateHiveDatabaseTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateHiveDatabaseMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "hive_database.name", String.valueOf(request.getHiveDatabase().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings deleteHiveDatabaseTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteHiveDatabaseMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings createHiveTableTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createHiveTableMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings getHiveTableTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getHiveTableMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + listHiveTablesTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listHiveTablesMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings updateHiveTableTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateHiveTableMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("hive_table.name", String.valueOf(request.getHiveTable().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings deleteHiveTableTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteHiveTableMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + batchCreatePartitionsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(batchCreatePartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings batchDeletePartitionsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(batchDeletePartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings + batchUpdatePartitionsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(batchUpdatePartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings + listPartitionsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listPartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + + this.createHiveCatalogCallable = + callableFactory.createUnaryCallable( + createHiveCatalogTransportSettings, + settings.createHiveCatalogSettings(), + clientContext); + this.getHiveCatalogCallable = + callableFactory.createUnaryCallable( + getHiveCatalogTransportSettings, settings.getHiveCatalogSettings(), clientContext); + this.listHiveCatalogsCallable = + callableFactory.createUnaryCallable( + listHiveCatalogsTransportSettings, settings.listHiveCatalogsSettings(), clientContext); + this.listHiveCatalogsPagedCallable = + callableFactory.createPagedCallable( + listHiveCatalogsTransportSettings, settings.listHiveCatalogsSettings(), clientContext); + this.updateHiveCatalogCallable = + callableFactory.createUnaryCallable( + updateHiveCatalogTransportSettings, + settings.updateHiveCatalogSettings(), + clientContext); + this.deleteHiveCatalogCallable = + callableFactory.createUnaryCallable( + deleteHiveCatalogTransportSettings, + settings.deleteHiveCatalogSettings(), + clientContext); + this.createHiveDatabaseCallable = + callableFactory.createUnaryCallable( + createHiveDatabaseTransportSettings, + settings.createHiveDatabaseSettings(), + clientContext); + this.getHiveDatabaseCallable = + callableFactory.createUnaryCallable( + getHiveDatabaseTransportSettings, settings.getHiveDatabaseSettings(), clientContext); + this.listHiveDatabasesCallable = + callableFactory.createUnaryCallable( + listHiveDatabasesTransportSettings, + settings.listHiveDatabasesSettings(), + clientContext); + this.listHiveDatabasesPagedCallable = + callableFactory.createPagedCallable( + listHiveDatabasesTransportSettings, + settings.listHiveDatabasesSettings(), + clientContext); + this.updateHiveDatabaseCallable = + callableFactory.createUnaryCallable( + updateHiveDatabaseTransportSettings, + settings.updateHiveDatabaseSettings(), + clientContext); + this.deleteHiveDatabaseCallable = + callableFactory.createUnaryCallable( + deleteHiveDatabaseTransportSettings, + settings.deleteHiveDatabaseSettings(), + clientContext); + this.createHiveTableCallable = + callableFactory.createUnaryCallable( + createHiveTableTransportSettings, settings.createHiveTableSettings(), clientContext); + this.getHiveTableCallable = + callableFactory.createUnaryCallable( + getHiveTableTransportSettings, settings.getHiveTableSettings(), clientContext); + this.listHiveTablesCallable = + callableFactory.createUnaryCallable( + listHiveTablesTransportSettings, settings.listHiveTablesSettings(), clientContext); + this.listHiveTablesPagedCallable = + callableFactory.createPagedCallable( + listHiveTablesTransportSettings, settings.listHiveTablesSettings(), clientContext); + this.updateHiveTableCallable = + callableFactory.createUnaryCallable( + updateHiveTableTransportSettings, settings.updateHiveTableSettings(), clientContext); + this.deleteHiveTableCallable = + callableFactory.createUnaryCallable( + deleteHiveTableTransportSettings, settings.deleteHiveTableSettings(), clientContext); + this.batchCreatePartitionsCallable = + callableFactory.createUnaryCallable( + batchCreatePartitionsTransportSettings, + settings.batchCreatePartitionsSettings(), + clientContext); + this.batchDeletePartitionsCallable = + callableFactory.createUnaryCallable( + batchDeletePartitionsTransportSettings, + settings.batchDeletePartitionsSettings(), + clientContext); + this.batchUpdatePartitionsCallable = + callableFactory.createUnaryCallable( + batchUpdatePartitionsTransportSettings, + settings.batchUpdatePartitionsSettings(), + clientContext); + this.listPartitionsCallable = + callableFactory.createServerStreamingCallable( + listPartitionsTransportSettings, settings.listPartitionsSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable createHiveCatalogCallable() { + return createHiveCatalogCallable; + } + + @Override + public UnaryCallable getHiveCatalogCallable() { + return getHiveCatalogCallable; + } + + @Override + public UnaryCallable + listHiveCatalogsCallable() { + return listHiveCatalogsCallable; + } + + @Override + public UnaryCallable + listHiveCatalogsPagedCallable() { + return listHiveCatalogsPagedCallable; + } + + @Override + public UnaryCallable updateHiveCatalogCallable() { + return updateHiveCatalogCallable; + } + + @Override + public UnaryCallable deleteHiveCatalogCallable() { + return deleteHiveCatalogCallable; + } + + @Override + public UnaryCallable createHiveDatabaseCallable() { + return createHiveDatabaseCallable; + } + + @Override + public UnaryCallable getHiveDatabaseCallable() { + return getHiveDatabaseCallable; + } + + @Override + public UnaryCallable + listHiveDatabasesCallable() { + return listHiveDatabasesCallable; + } + + @Override + public UnaryCallable + listHiveDatabasesPagedCallable() { + return listHiveDatabasesPagedCallable; + } + + @Override + public UnaryCallable updateHiveDatabaseCallable() { + return updateHiveDatabaseCallable; + } + + @Override + public UnaryCallable deleteHiveDatabaseCallable() { + return deleteHiveDatabaseCallable; + } + + @Override + public UnaryCallable createHiveTableCallable() { + return createHiveTableCallable; + } + + @Override + public UnaryCallable getHiveTableCallable() { + return getHiveTableCallable; + } + + @Override + public UnaryCallable listHiveTablesCallable() { + return listHiveTablesCallable; + } + + @Override + public UnaryCallable + listHiveTablesPagedCallable() { + return listHiveTablesPagedCallable; + } + + @Override + public UnaryCallable updateHiveTableCallable() { + return updateHiveTableCallable; + } + + @Override + public UnaryCallable deleteHiveTableCallable() { + return deleteHiveTableCallable; + } + + @Override + public UnaryCallable + batchCreatePartitionsCallable() { + return batchCreatePartitionsCallable; + } + + @Override + public UnaryCallable batchDeletePartitionsCallable() { + return batchDeletePartitionsCallable; + } + + @Override + public UnaryCallable + batchUpdatePartitionsCallable() { + return batchUpdatePartitionsCallable; + } + + @Override + public ServerStreamingCallable + listPartitionsCallable() { + return listPartitionsCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/HiveMetastoreServiceStub.java b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/HiveMetastoreServiceStub.java new file mode 100644 index 000000000000..d809f5a0f394 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/HiveMetastoreServiceStub.java @@ -0,0 +1,166 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.stub; + +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveCatalogsPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveDatabasesPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveTablesPagedResponse; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse; +import com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse; +import com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse; +import com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest; +import com.google.protobuf.Empty; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the HiveMetastoreService service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@BetaApi +@Generated("by gapic-generator-java") +public abstract class HiveMetastoreServiceStub implements BackgroundResource { + + public UnaryCallable createHiveCatalogCallable() { + throw new UnsupportedOperationException("Not implemented: createHiveCatalogCallable()"); + } + + public UnaryCallable getHiveCatalogCallable() { + throw new UnsupportedOperationException("Not implemented: getHiveCatalogCallable()"); + } + + public UnaryCallable + listHiveCatalogsPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listHiveCatalogsPagedCallable()"); + } + + public UnaryCallable + listHiveCatalogsCallable() { + throw new UnsupportedOperationException("Not implemented: listHiveCatalogsCallable()"); + } + + public UnaryCallable updateHiveCatalogCallable() { + throw new UnsupportedOperationException("Not implemented: updateHiveCatalogCallable()"); + } + + public UnaryCallable deleteHiveCatalogCallable() { + throw new UnsupportedOperationException("Not implemented: deleteHiveCatalogCallable()"); + } + + public UnaryCallable createHiveDatabaseCallable() { + throw new UnsupportedOperationException("Not implemented: createHiveDatabaseCallable()"); + } + + public UnaryCallable getHiveDatabaseCallable() { + throw new UnsupportedOperationException("Not implemented: getHiveDatabaseCallable()"); + } + + public UnaryCallable + listHiveDatabasesPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listHiveDatabasesPagedCallable()"); + } + + public UnaryCallable + listHiveDatabasesCallable() { + throw new UnsupportedOperationException("Not implemented: listHiveDatabasesCallable()"); + } + + public UnaryCallable updateHiveDatabaseCallable() { + throw new UnsupportedOperationException("Not implemented: updateHiveDatabaseCallable()"); + } + + public UnaryCallable deleteHiveDatabaseCallable() { + throw new UnsupportedOperationException("Not implemented: deleteHiveDatabaseCallable()"); + } + + public UnaryCallable createHiveTableCallable() { + throw new UnsupportedOperationException("Not implemented: createHiveTableCallable()"); + } + + public UnaryCallable getHiveTableCallable() { + throw new UnsupportedOperationException("Not implemented: getHiveTableCallable()"); + } + + public UnaryCallable + listHiveTablesPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listHiveTablesPagedCallable()"); + } + + public UnaryCallable listHiveTablesCallable() { + throw new UnsupportedOperationException("Not implemented: listHiveTablesCallable()"); + } + + public UnaryCallable updateHiveTableCallable() { + throw new UnsupportedOperationException("Not implemented: updateHiveTableCallable()"); + } + + public UnaryCallable deleteHiveTableCallable() { + throw new UnsupportedOperationException("Not implemented: deleteHiveTableCallable()"); + } + + public UnaryCallable + batchCreatePartitionsCallable() { + throw new UnsupportedOperationException("Not implemented: batchCreatePartitionsCallable()"); + } + + public UnaryCallable batchDeletePartitionsCallable() { + throw new UnsupportedOperationException("Not implemented: batchDeletePartitionsCallable()"); + } + + public UnaryCallable + batchUpdatePartitionsCallable() { + throw new UnsupportedOperationException("Not implemented: batchUpdatePartitionsCallable()"); + } + + public ServerStreamingCallable + listPartitionsCallable() { + throw new UnsupportedOperationException("Not implemented: listPartitionsCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/HiveMetastoreServiceStubSettings.java b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/HiveMetastoreServiceStubSettings.java new file mode 100644 index 000000000000..f6e87ce5fb8b --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/HiveMetastoreServiceStubSettings.java @@ -0,0 +1,1023 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.stub; + +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveCatalogsPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveDatabasesPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveTablesPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.httpjson.GaxHttpJsonProperties; +import com.google.api.gax.httpjson.HttpJsonTransportChannel; +import com.google.api.gax.httpjson.InstantiatingHttpJsonChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.LibraryMetadata; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.PagedListDescriptor; +import com.google.api.gax.rpc.PagedListResponseFactory; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse; +import com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse; +import com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse; +import com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link HiveMetastoreServiceStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (biglake.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createHiveCatalog: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * HiveMetastoreServiceStubSettings.Builder hiveMetastoreServiceSettingsBuilder =
+ *     HiveMetastoreServiceStubSettings.newBuilder();
+ * hiveMetastoreServiceSettingsBuilder
+ *     .createHiveCatalogSettings()
+ *     .setRetrySettings(
+ *         hiveMetastoreServiceSettingsBuilder
+ *             .createHiveCatalogSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * HiveMetastoreServiceStubSettings hiveMetastoreServiceSettings =
+ *     hiveMetastoreServiceSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@BetaApi +@Generated("by gapic-generator-java") +@SuppressWarnings("CanonicalDuration") +public class HiveMetastoreServiceStubSettings + extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/bigquery") + .add("https://www.googleapis.com/auth/cloud-platform") + .build(); + + private final UnaryCallSettings createHiveCatalogSettings; + private final UnaryCallSettings getHiveCatalogSettings; + private final PagedCallSettings< + ListHiveCatalogsRequest, ListHiveCatalogsResponse, ListHiveCatalogsPagedResponse> + listHiveCatalogsSettings; + private final UnaryCallSettings updateHiveCatalogSettings; + private final UnaryCallSettings deleteHiveCatalogSettings; + private final UnaryCallSettings + createHiveDatabaseSettings; + private final UnaryCallSettings getHiveDatabaseSettings; + private final PagedCallSettings< + ListHiveDatabasesRequest, ListHiveDatabasesResponse, ListHiveDatabasesPagedResponse> + listHiveDatabasesSettings; + private final UnaryCallSettings + updateHiveDatabaseSettings; + private final UnaryCallSettings deleteHiveDatabaseSettings; + private final UnaryCallSettings createHiveTableSettings; + private final UnaryCallSettings getHiveTableSettings; + private final PagedCallSettings< + ListHiveTablesRequest, ListHiveTablesResponse, ListHiveTablesPagedResponse> + listHiveTablesSettings; + private final UnaryCallSettings updateHiveTableSettings; + private final UnaryCallSettings deleteHiveTableSettings; + private final UnaryCallSettings + batchCreatePartitionsSettings; + private final UnaryCallSettings + batchDeletePartitionsSettings; + private final UnaryCallSettings + batchUpdatePartitionsSettings; + private final ServerStreamingCallSettings + listPartitionsSettings; + + private static final PagedListDescriptor< + ListHiveCatalogsRequest, ListHiveCatalogsResponse, HiveCatalog> + LIST_HIVE_CATALOGS_PAGE_STR_DESC = + new PagedListDescriptor< + ListHiveCatalogsRequest, ListHiveCatalogsResponse, HiveCatalog>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListHiveCatalogsRequest injectToken( + ListHiveCatalogsRequest payload, String token) { + return ListHiveCatalogsRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListHiveCatalogsRequest injectPageSize( + ListHiveCatalogsRequest payload, int pageSize) { + return ListHiveCatalogsRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListHiveCatalogsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListHiveCatalogsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListHiveCatalogsResponse payload) { + return payload.getCatalogsList(); + } + }; + + private static final PagedListDescriptor< + ListHiveDatabasesRequest, ListHiveDatabasesResponse, HiveDatabase> + LIST_HIVE_DATABASES_PAGE_STR_DESC = + new PagedListDescriptor< + ListHiveDatabasesRequest, ListHiveDatabasesResponse, HiveDatabase>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListHiveDatabasesRequest injectToken( + ListHiveDatabasesRequest payload, String token) { + return ListHiveDatabasesRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListHiveDatabasesRequest injectPageSize( + ListHiveDatabasesRequest payload, int pageSize) { + return ListHiveDatabasesRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListHiveDatabasesRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListHiveDatabasesResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListHiveDatabasesResponse payload) { + return payload.getDatabasesList(); + } + }; + + private static final PagedListDescriptor + LIST_HIVE_TABLES_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListHiveTablesRequest injectToken(ListHiveTablesRequest payload, String token) { + return ListHiveTablesRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListHiveTablesRequest injectPageSize( + ListHiveTablesRequest payload, int pageSize) { + return ListHiveTablesRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListHiveTablesRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListHiveTablesResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListHiveTablesResponse payload) { + return payload.getTablesList(); + } + }; + + private static final PagedListResponseFactory< + ListHiveCatalogsRequest, ListHiveCatalogsResponse, ListHiveCatalogsPagedResponse> + LIST_HIVE_CATALOGS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListHiveCatalogsRequest, ListHiveCatalogsResponse, ListHiveCatalogsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListHiveCatalogsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext + pageContext = + PageContext.create( + callable, LIST_HIVE_CATALOGS_PAGE_STR_DESC, request, context); + return ListHiveCatalogsPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListHiveDatabasesRequest, ListHiveDatabasesResponse, ListHiveDatabasesPagedResponse> + LIST_HIVE_DATABASES_PAGE_STR_FACT = + new PagedListResponseFactory< + ListHiveDatabasesRequest, + ListHiveDatabasesResponse, + ListHiveDatabasesPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListHiveDatabasesRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext + pageContext = + PageContext.create( + callable, LIST_HIVE_DATABASES_PAGE_STR_DESC, request, context); + return ListHiveDatabasesPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListHiveTablesRequest, ListHiveTablesResponse, ListHiveTablesPagedResponse> + LIST_HIVE_TABLES_PAGE_STR_FACT = + new PagedListResponseFactory< + ListHiveTablesRequest, ListHiveTablesResponse, ListHiveTablesPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListHiveTablesRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_HIVE_TABLES_PAGE_STR_DESC, request, context); + return ListHiveTablesPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + /** Returns the object with the settings used for calls to createHiveCatalog. */ + public UnaryCallSettings createHiveCatalogSettings() { + return createHiveCatalogSettings; + } + + /** Returns the object with the settings used for calls to getHiveCatalog. */ + public UnaryCallSettings getHiveCatalogSettings() { + return getHiveCatalogSettings; + } + + /** Returns the object with the settings used for calls to listHiveCatalogs. */ + public PagedCallSettings< + ListHiveCatalogsRequest, ListHiveCatalogsResponse, ListHiveCatalogsPagedResponse> + listHiveCatalogsSettings() { + return listHiveCatalogsSettings; + } + + /** Returns the object with the settings used for calls to updateHiveCatalog. */ + public UnaryCallSettings updateHiveCatalogSettings() { + return updateHiveCatalogSettings; + } + + /** Returns the object with the settings used for calls to deleteHiveCatalog. */ + public UnaryCallSettings deleteHiveCatalogSettings() { + return deleteHiveCatalogSettings; + } + + /** Returns the object with the settings used for calls to createHiveDatabase. */ + public UnaryCallSettings createHiveDatabaseSettings() { + return createHiveDatabaseSettings; + } + + /** Returns the object with the settings used for calls to getHiveDatabase. */ + public UnaryCallSettings getHiveDatabaseSettings() { + return getHiveDatabaseSettings; + } + + /** Returns the object with the settings used for calls to listHiveDatabases. */ + public PagedCallSettings< + ListHiveDatabasesRequest, ListHiveDatabasesResponse, ListHiveDatabasesPagedResponse> + listHiveDatabasesSettings() { + return listHiveDatabasesSettings; + } + + /** Returns the object with the settings used for calls to updateHiveDatabase. */ + public UnaryCallSettings updateHiveDatabaseSettings() { + return updateHiveDatabaseSettings; + } + + /** Returns the object with the settings used for calls to deleteHiveDatabase. */ + public UnaryCallSettings deleteHiveDatabaseSettings() { + return deleteHiveDatabaseSettings; + } + + /** Returns the object with the settings used for calls to createHiveTable. */ + public UnaryCallSettings createHiveTableSettings() { + return createHiveTableSettings; + } + + /** Returns the object with the settings used for calls to getHiveTable. */ + public UnaryCallSettings getHiveTableSettings() { + return getHiveTableSettings; + } + + /** Returns the object with the settings used for calls to listHiveTables. */ + public PagedCallSettings< + ListHiveTablesRequest, ListHiveTablesResponse, ListHiveTablesPagedResponse> + listHiveTablesSettings() { + return listHiveTablesSettings; + } + + /** Returns the object with the settings used for calls to updateHiveTable. */ + public UnaryCallSettings updateHiveTableSettings() { + return updateHiveTableSettings; + } + + /** Returns the object with the settings used for calls to deleteHiveTable. */ + public UnaryCallSettings deleteHiveTableSettings() { + return deleteHiveTableSettings; + } + + /** Returns the object with the settings used for calls to batchCreatePartitions. */ + public UnaryCallSettings + batchCreatePartitionsSettings() { + return batchCreatePartitionsSettings; + } + + /** Returns the object with the settings used for calls to batchDeletePartitions. */ + public UnaryCallSettings batchDeletePartitionsSettings() { + return batchDeletePartitionsSettings; + } + + /** Returns the object with the settings used for calls to batchUpdatePartitions. */ + public UnaryCallSettings + batchUpdatePartitionsSettings() { + return batchUpdatePartitionsSettings; + } + + /** Returns the object with the settings used for calls to listPartitions. */ + public ServerStreamingCallSettings + listPartitionsSettings() { + return listPartitionsSettings; + } + + public HiveMetastoreServiceStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcHiveMetastoreServiceStub.create(this); + } + if (getTransportChannelProvider() + .getTransportName() + .equals(HttpJsonTransportChannel.getHttpJsonTransportName())) { + return HttpJsonHiveMetastoreServiceStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "biglake"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "biglake.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "biglake.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default gRPC ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + /** Returns a builder for the default REST ChannelProvider for this service. */ + @BetaApi + public static InstantiatingHttpJsonChannelProvider.Builder + defaultHttpJsonTransportProviderBuilder() { + return InstantiatingHttpJsonChannelProvider.newBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultGrpcApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(HiveMetastoreServiceStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + public static ApiClientHeaderProvider.Builder defaultHttpJsonApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(HiveMetastoreServiceStubSettings.class)) + .setTransportToken( + GaxHttpJsonProperties.getHttpJsonTokenName(), + GaxHttpJsonProperties.getHttpJsonVersion()); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return HiveMetastoreServiceStubSettings.defaultGrpcApiClientHeaderProviderBuilder(); + } + + /** Returns a new gRPC builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new REST builder for this class. */ + public static Builder newHttpJsonBuilder() { + return Builder.createHttpJsonDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected HiveMetastoreServiceStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createHiveCatalogSettings = settingsBuilder.createHiveCatalogSettings().build(); + getHiveCatalogSettings = settingsBuilder.getHiveCatalogSettings().build(); + listHiveCatalogsSettings = settingsBuilder.listHiveCatalogsSettings().build(); + updateHiveCatalogSettings = settingsBuilder.updateHiveCatalogSettings().build(); + deleteHiveCatalogSettings = settingsBuilder.deleteHiveCatalogSettings().build(); + createHiveDatabaseSettings = settingsBuilder.createHiveDatabaseSettings().build(); + getHiveDatabaseSettings = settingsBuilder.getHiveDatabaseSettings().build(); + listHiveDatabasesSettings = settingsBuilder.listHiveDatabasesSettings().build(); + updateHiveDatabaseSettings = settingsBuilder.updateHiveDatabaseSettings().build(); + deleteHiveDatabaseSettings = settingsBuilder.deleteHiveDatabaseSettings().build(); + createHiveTableSettings = settingsBuilder.createHiveTableSettings().build(); + getHiveTableSettings = settingsBuilder.getHiveTableSettings().build(); + listHiveTablesSettings = settingsBuilder.listHiveTablesSettings().build(); + updateHiveTableSettings = settingsBuilder.updateHiveTableSettings().build(); + deleteHiveTableSettings = settingsBuilder.deleteHiveTableSettings().build(); + batchCreatePartitionsSettings = settingsBuilder.batchCreatePartitionsSettings().build(); + batchDeletePartitionsSettings = settingsBuilder.batchDeletePartitionsSettings().build(); + batchUpdatePartitionsSettings = settingsBuilder.batchUpdatePartitionsSettings().build(); + listPartitionsSettings = settingsBuilder.listPartitionsSettings().build(); + } + + @Override + protected LibraryMetadata getLibraryMetadata() { + return LibraryMetadata.newBuilder() + .setArtifactName("com.google.cloud:google-cloud-biglake") + .setRepository("googleapis/google-cloud-java") + .build(); + } + + /** Builder for HiveMetastoreServiceStubSettings. */ + public static class Builder + extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder + createHiveCatalogSettings; + private final UnaryCallSettings.Builder + getHiveCatalogSettings; + private final PagedCallSettings.Builder< + ListHiveCatalogsRequest, ListHiveCatalogsResponse, ListHiveCatalogsPagedResponse> + listHiveCatalogsSettings; + private final UnaryCallSettings.Builder + updateHiveCatalogSettings; + private final UnaryCallSettings.Builder + deleteHiveCatalogSettings; + private final UnaryCallSettings.Builder + createHiveDatabaseSettings; + private final UnaryCallSettings.Builder + getHiveDatabaseSettings; + private final PagedCallSettings.Builder< + ListHiveDatabasesRequest, ListHiveDatabasesResponse, ListHiveDatabasesPagedResponse> + listHiveDatabasesSettings; + private final UnaryCallSettings.Builder + updateHiveDatabaseSettings; + private final UnaryCallSettings.Builder + deleteHiveDatabaseSettings; + private final UnaryCallSettings.Builder + createHiveTableSettings; + private final UnaryCallSettings.Builder getHiveTableSettings; + private final PagedCallSettings.Builder< + ListHiveTablesRequest, ListHiveTablesResponse, ListHiveTablesPagedResponse> + listHiveTablesSettings; + private final UnaryCallSettings.Builder + updateHiveTableSettings; + private final UnaryCallSettings.Builder deleteHiveTableSettings; + private final UnaryCallSettings.Builder< + BatchCreatePartitionsRequest, BatchCreatePartitionsResponse> + batchCreatePartitionsSettings; + private final UnaryCallSettings.Builder + batchDeletePartitionsSettings; + private final UnaryCallSettings.Builder< + BatchUpdatePartitionsRequest, BatchUpdatePartitionsResponse> + batchUpdatePartitionsSettings; + private final ServerStreamingCallSettings.Builder + listPartitionsSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_0_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(10000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setTotalTimeoutDuration(Duration.ofMillis(60000L)) + .build(); + definitions.put("retry_policy_0_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createHiveCatalogSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getHiveCatalogSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listHiveCatalogsSettings = PagedCallSettings.newBuilder(LIST_HIVE_CATALOGS_PAGE_STR_FACT); + updateHiveCatalogSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + deleteHiveCatalogSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createHiveDatabaseSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getHiveDatabaseSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listHiveDatabasesSettings = PagedCallSettings.newBuilder(LIST_HIVE_DATABASES_PAGE_STR_FACT); + updateHiveDatabaseSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + deleteHiveDatabaseSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createHiveTableSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getHiveTableSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listHiveTablesSettings = PagedCallSettings.newBuilder(LIST_HIVE_TABLES_PAGE_STR_FACT); + updateHiveTableSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + deleteHiveTableSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + batchCreatePartitionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + batchDeletePartitionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + batchUpdatePartitionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listPartitionsSettings = ServerStreamingCallSettings.newBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createHiveCatalogSettings, + getHiveCatalogSettings, + listHiveCatalogsSettings, + updateHiveCatalogSettings, + deleteHiveCatalogSettings, + createHiveDatabaseSettings, + getHiveDatabaseSettings, + listHiveDatabasesSettings, + updateHiveDatabaseSettings, + deleteHiveDatabaseSettings, + createHiveTableSettings, + getHiveTableSettings, + listHiveTablesSettings, + updateHiveTableSettings, + deleteHiveTableSettings, + batchCreatePartitionsSettings, + batchDeletePartitionsSettings, + batchUpdatePartitionsSettings); + initDefaults(this); + } + + protected Builder(HiveMetastoreServiceStubSettings settings) { + super(settings); + + createHiveCatalogSettings = settings.createHiveCatalogSettings.toBuilder(); + getHiveCatalogSettings = settings.getHiveCatalogSettings.toBuilder(); + listHiveCatalogsSettings = settings.listHiveCatalogsSettings.toBuilder(); + updateHiveCatalogSettings = settings.updateHiveCatalogSettings.toBuilder(); + deleteHiveCatalogSettings = settings.deleteHiveCatalogSettings.toBuilder(); + createHiveDatabaseSettings = settings.createHiveDatabaseSettings.toBuilder(); + getHiveDatabaseSettings = settings.getHiveDatabaseSettings.toBuilder(); + listHiveDatabasesSettings = settings.listHiveDatabasesSettings.toBuilder(); + updateHiveDatabaseSettings = settings.updateHiveDatabaseSettings.toBuilder(); + deleteHiveDatabaseSettings = settings.deleteHiveDatabaseSettings.toBuilder(); + createHiveTableSettings = settings.createHiveTableSettings.toBuilder(); + getHiveTableSettings = settings.getHiveTableSettings.toBuilder(); + listHiveTablesSettings = settings.listHiveTablesSettings.toBuilder(); + updateHiveTableSettings = settings.updateHiveTableSettings.toBuilder(); + deleteHiveTableSettings = settings.deleteHiveTableSettings.toBuilder(); + batchCreatePartitionsSettings = settings.batchCreatePartitionsSettings.toBuilder(); + batchDeletePartitionsSettings = settings.batchDeletePartitionsSettings.toBuilder(); + batchUpdatePartitionsSettings = settings.batchUpdatePartitionsSettings.toBuilder(); + listPartitionsSettings = settings.listPartitionsSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createHiveCatalogSettings, + getHiveCatalogSettings, + listHiveCatalogsSettings, + updateHiveCatalogSettings, + deleteHiveCatalogSettings, + createHiveDatabaseSettings, + getHiveDatabaseSettings, + listHiveDatabasesSettings, + updateHiveDatabaseSettings, + deleteHiveDatabaseSettings, + createHiveTableSettings, + getHiveTableSettings, + listHiveTablesSettings, + updateHiveTableSettings, + deleteHiveTableSettings, + batchCreatePartitionsSettings, + batchDeletePartitionsSettings, + batchUpdatePartitionsSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder createHttpJsonDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultHttpJsonTransportProviderBuilder().build()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultHttpJsonApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .createHiveCatalogSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getHiveCatalogSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listHiveCatalogsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateHiveCatalogSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .deleteHiveCatalogSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .createHiveDatabaseSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getHiveDatabaseSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listHiveDatabasesSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateHiveDatabaseSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .deleteHiveDatabaseSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .createHiveTableSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getHiveTableSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listHiveTablesSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateHiveTableSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .deleteHiveTableSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .batchCreatePartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .batchDeletePartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .batchUpdatePartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listPartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createHiveCatalog. */ + public UnaryCallSettings.Builder + createHiveCatalogSettings() { + return createHiveCatalogSettings; + } + + /** Returns the builder for the settings used for calls to getHiveCatalog. */ + public UnaryCallSettings.Builder getHiveCatalogSettings() { + return getHiveCatalogSettings; + } + + /** Returns the builder for the settings used for calls to listHiveCatalogs. */ + public PagedCallSettings.Builder< + ListHiveCatalogsRequest, ListHiveCatalogsResponse, ListHiveCatalogsPagedResponse> + listHiveCatalogsSettings() { + return listHiveCatalogsSettings; + } + + /** Returns the builder for the settings used for calls to updateHiveCatalog. */ + public UnaryCallSettings.Builder + updateHiveCatalogSettings() { + return updateHiveCatalogSettings; + } + + /** Returns the builder for the settings used for calls to deleteHiveCatalog. */ + public UnaryCallSettings.Builder deleteHiveCatalogSettings() { + return deleteHiveCatalogSettings; + } + + /** Returns the builder for the settings used for calls to createHiveDatabase. */ + public UnaryCallSettings.Builder + createHiveDatabaseSettings() { + return createHiveDatabaseSettings; + } + + /** Returns the builder for the settings used for calls to getHiveDatabase. */ + public UnaryCallSettings.Builder + getHiveDatabaseSettings() { + return getHiveDatabaseSettings; + } + + /** Returns the builder for the settings used for calls to listHiveDatabases. */ + public PagedCallSettings.Builder< + ListHiveDatabasesRequest, ListHiveDatabasesResponse, ListHiveDatabasesPagedResponse> + listHiveDatabasesSettings() { + return listHiveDatabasesSettings; + } + + /** Returns the builder for the settings used for calls to updateHiveDatabase. */ + public UnaryCallSettings.Builder + updateHiveDatabaseSettings() { + return updateHiveDatabaseSettings; + } + + /** Returns the builder for the settings used for calls to deleteHiveDatabase. */ + public UnaryCallSettings.Builder + deleteHiveDatabaseSettings() { + return deleteHiveDatabaseSettings; + } + + /** Returns the builder for the settings used for calls to createHiveTable. */ + public UnaryCallSettings.Builder createHiveTableSettings() { + return createHiveTableSettings; + } + + /** Returns the builder for the settings used for calls to getHiveTable. */ + public UnaryCallSettings.Builder getHiveTableSettings() { + return getHiveTableSettings; + } + + /** Returns the builder for the settings used for calls to listHiveTables. */ + public PagedCallSettings.Builder< + ListHiveTablesRequest, ListHiveTablesResponse, ListHiveTablesPagedResponse> + listHiveTablesSettings() { + return listHiveTablesSettings; + } + + /** Returns the builder for the settings used for calls to updateHiveTable. */ + public UnaryCallSettings.Builder updateHiveTableSettings() { + return updateHiveTableSettings; + } + + /** Returns the builder for the settings used for calls to deleteHiveTable. */ + public UnaryCallSettings.Builder deleteHiveTableSettings() { + return deleteHiveTableSettings; + } + + /** Returns the builder for the settings used for calls to batchCreatePartitions. */ + public UnaryCallSettings.Builder + batchCreatePartitionsSettings() { + return batchCreatePartitionsSettings; + } + + /** Returns the builder for the settings used for calls to batchDeletePartitions. */ + public UnaryCallSettings.Builder + batchDeletePartitionsSettings() { + return batchDeletePartitionsSettings; + } + + /** Returns the builder for the settings used for calls to batchUpdatePartitions. */ + public UnaryCallSettings.Builder + batchUpdatePartitionsSettings() { + return batchUpdatePartitionsSettings; + } + + /** Returns the builder for the settings used for calls to listPartitions. */ + public ServerStreamingCallSettings.Builder + listPartitionsSettings() { + return listPartitionsSettings; + } + + @Override + public HiveMetastoreServiceStubSettings build() throws IOException { + return new HiveMetastoreServiceStubSettings(this); + } + } +} diff --git a/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/HttpJsonHiveMetastoreServiceCallableFactory.java b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/HttpJsonHiveMetastoreServiceCallableFactory.java new file mode 100644 index 000000000000..26d4a1041342 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/HttpJsonHiveMetastoreServiceCallableFactory.java @@ -0,0 +1,103 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.httpjson.HttpJsonCallSettings; +import com.google.api.gax.httpjson.HttpJsonCallableFactory; +import com.google.api.gax.httpjson.HttpJsonOperationSnapshotCallable; +import com.google.api.gax.httpjson.HttpJsonStubCallableFactory; +import com.google.api.gax.httpjson.longrunning.stub.OperationsStub; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * REST callable factory implementation for the HiveMetastoreService service API. + * + *

This class is for advanced usage. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class HttpJsonHiveMetastoreServiceCallableFactory + implements HttpJsonStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + HttpJsonCallSettings httpJsonCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createUnaryCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + HttpJsonCallSettings httpJsonCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createPagedCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + HttpJsonCallSettings httpJsonCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createBatchingCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + HttpJsonCallSettings httpJsonCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + UnaryCallable innerCallable = + HttpJsonCallableFactory.createBaseUnaryCallable( + httpJsonCallSettings, callSettings.getInitialCallSettings(), clientContext); + HttpJsonOperationSnapshotCallable initialCallable = + new HttpJsonOperationSnapshotCallable( + innerCallable, + httpJsonCallSettings.getMethodDescriptor().getOperationSnapshotFactory()); + return HttpJsonCallableFactory.createOperationCallable( + callSettings, clientContext, operationsStub.longRunningClient(), initialCallable); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + HttpJsonCallSettings httpJsonCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createServerStreamingCallable( + httpJsonCallSettings, callSettings, clientContext); + } +} diff --git a/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/HttpJsonHiveMetastoreServiceStub.java b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/HttpJsonHiveMetastoreServiceStub.java new file mode 100644 index 000000000000..f36b56ad5856 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/main/java/com/google/cloud/biglake/hive/v1beta/stub/HttpJsonHiveMetastoreServiceStub.java @@ -0,0 +1,1373 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.stub; + +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveCatalogsPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveDatabasesPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveTablesPagedResponse; + +import com.google.api.core.BetaApi; +import com.google.api.core.InternalApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.httpjson.ApiMethodDescriptor; +import com.google.api.gax.httpjson.HttpJsonCallSettings; +import com.google.api.gax.httpjson.HttpJsonStubCallableFactory; +import com.google.api.gax.httpjson.ProtoMessageRequestFormatter; +import com.google.api.gax.httpjson.ProtoMessageResponseParser; +import com.google.api.gax.httpjson.ProtoRestSerializer; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse; +import com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse; +import com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse; +import com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest; +import com.google.protobuf.Empty; +import com.google.protobuf.TypeRegistry; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * REST stub implementation for the HiveMetastoreService service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class HttpJsonHiveMetastoreServiceStub extends HiveMetastoreServiceStub { + private static final TypeRegistry typeRegistry = TypeRegistry.newBuilder().build(); + + private static final ApiMethodDescriptor + createHiveCatalogMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/CreateHiveCatalog") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{parent=projects/*}/catalogs", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam( + fields, "hiveCatalogId", request.getHiveCatalogId()); + serializer.putQueryParam( + fields, "primaryLocation", request.getPrimaryLocation()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("hiveCatalog", request.getHiveCatalog(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(HiveCatalog.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getHiveCatalogMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/GetHiveCatalog") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{name=projects/*/catalogs/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(HiveCatalog.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + listHiveCatalogsMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/ListHiveCatalogs") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{parent=projects/*}/catalogs", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListHiveCatalogsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + updateHiveCatalogMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/UpdateHiveCatalog") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{hiveCatalog.name=projects/*/catalogs/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, "hiveCatalog.name", request.getHiveCatalog().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "updateMask", request.getUpdateMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("hiveCatalog", request.getHiveCatalog(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(HiveCatalog.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + deleteHiveCatalogMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/DeleteHiveCatalog") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{name=projects/*/catalogs/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + createHiveDatabaseMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/CreateHiveDatabase") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{parent=projects/*/catalogs/*}/databases", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam( + fields, "hiveDatabaseId", request.getHiveDatabaseId()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("hiveDatabase", request.getHiveDatabase(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(HiveDatabase.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getHiveDatabaseMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/GetHiveDatabase") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{name=projects/*/catalogs/*/databases/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(HiveDatabase.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + listHiveDatabasesMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/ListHiveDatabases") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{parent=projects/*/catalogs/*}/databases", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListHiveDatabasesResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + updateHiveDatabaseMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/UpdateHiveDatabase") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{hiveDatabase.name=projects/*/catalogs/*/databases/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, "hiveDatabase.name", request.getHiveDatabase().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "updateMask", request.getUpdateMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("hiveDatabase", request.getHiveDatabase(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(HiveDatabase.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + deleteHiveDatabaseMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/DeleteHiveDatabase") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{name=projects/*/catalogs/*/databases/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + createHiveTableMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/CreateHiveTable") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{parent=projects/*/catalogs/*/databases/*}/tables", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam( + fields, "hiveTableId", request.getHiveTableId()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("hiveTable", request.getHiveTable(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(HiveTable.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getHiveTableMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/GetHiveTable") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{name=projects/*/catalogs/*/databases/*/tables/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(HiveTable.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + listHiveTablesMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/ListHiveTables") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{parent=projects/*/catalogs/*/databases/*}/tables", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListHiveTablesResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + updateHiveTableMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/UpdateHiveTable") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{hiveTable.name=projects/*/catalogs/*/databases/*/tables/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, "hiveTable.name", request.getHiveTable().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "updateMask", request.getUpdateMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("hiveTable", request.getHiveTable(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(HiveTable.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + deleteHiveTableMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/DeleteHiveTable") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{name=projects/*/catalogs/*/databases/*/tables/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor< + BatchCreatePartitionsRequest, BatchCreatePartitionsResponse> + batchCreatePartitionsMethodDescriptor = + ApiMethodDescriptor + .newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/BatchCreatePartitions") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{parent=projects/*/catalogs/*/databases/*/tables/*}/partitions:batchCreate", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearParent().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(BatchCreatePartitionsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + batchDeletePartitionsMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/BatchDeletePartitions") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{parent=projects/*/catalogs/*/databases/*/tables/*}/partitions:batchDelete", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearParent().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor< + BatchUpdatePartitionsRequest, BatchUpdatePartitionsResponse> + batchUpdatePartitionsMethodDescriptor = + ApiMethodDescriptor + .newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/BatchUpdatePartitions") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{parent=projects/*/catalogs/*/databases/*/tables/*}/partitions:batchUpdate", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearParent().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(BatchUpdatePartitionsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + listPartitionsMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.biglake.hive.v1beta.HiveMetastoreService/ListPartitions") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.SERVER_STREAMING) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/hive/v1beta/{parent=projects/*/catalogs/*/databases/*/tables/*}/partitions:list", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "filter", request.getFilter()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListPartitionsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private final UnaryCallable createHiveCatalogCallable; + private final UnaryCallable getHiveCatalogCallable; + private final UnaryCallable + listHiveCatalogsCallable; + private final UnaryCallable + listHiveCatalogsPagedCallable; + private final UnaryCallable updateHiveCatalogCallable; + private final UnaryCallable deleteHiveCatalogCallable; + private final UnaryCallable createHiveDatabaseCallable; + private final UnaryCallable getHiveDatabaseCallable; + private final UnaryCallable + listHiveDatabasesCallable; + private final UnaryCallable + listHiveDatabasesPagedCallable; + private final UnaryCallable updateHiveDatabaseCallable; + private final UnaryCallable deleteHiveDatabaseCallable; + private final UnaryCallable createHiveTableCallable; + private final UnaryCallable getHiveTableCallable; + private final UnaryCallable listHiveTablesCallable; + private final UnaryCallable + listHiveTablesPagedCallable; + private final UnaryCallable updateHiveTableCallable; + private final UnaryCallable deleteHiveTableCallable; + private final UnaryCallable + batchCreatePartitionsCallable; + private final UnaryCallable batchDeletePartitionsCallable; + private final UnaryCallable + batchUpdatePartitionsCallable; + private final ServerStreamingCallable + listPartitionsCallable; + + private final BackgroundResource backgroundResources; + private final HttpJsonStubCallableFactory callableFactory; + + public static final HttpJsonHiveMetastoreServiceStub create( + HiveMetastoreServiceStubSettings settings) throws IOException { + return new HttpJsonHiveMetastoreServiceStub(settings, ClientContext.create(settings)); + } + + public static final HttpJsonHiveMetastoreServiceStub create(ClientContext clientContext) + throws IOException { + return new HttpJsonHiveMetastoreServiceStub( + HiveMetastoreServiceStubSettings.newHttpJsonBuilder().build(), clientContext); + } + + public static final HttpJsonHiveMetastoreServiceStub create( + ClientContext clientContext, HttpJsonStubCallableFactory callableFactory) throws IOException { + return new HttpJsonHiveMetastoreServiceStub( + HiveMetastoreServiceStubSettings.newHttpJsonBuilder().build(), + clientContext, + callableFactory); + } + + /** + * Constructs an instance of HttpJsonHiveMetastoreServiceStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected HttpJsonHiveMetastoreServiceStub( + HiveMetastoreServiceStubSettings settings, ClientContext clientContext) throws IOException { + this(settings, clientContext, new HttpJsonHiveMetastoreServiceCallableFactory()); + } + + /** + * Constructs an instance of HttpJsonHiveMetastoreServiceStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected HttpJsonHiveMetastoreServiceStub( + HiveMetastoreServiceStubSettings settings, + ClientContext clientContext, + HttpJsonStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + + HttpJsonCallSettings createHiveCatalogTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createHiveCatalogMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings getHiveCatalogTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getHiveCatalogMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + listHiveCatalogsTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(listHiveCatalogsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings updateHiveCatalogTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateHiveCatalogMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "hive_catalog.name", String.valueOf(request.getHiveCatalog().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings deleteHiveCatalogTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(deleteHiveCatalogMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + createHiveDatabaseTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createHiveDatabaseMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings getHiveDatabaseTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getHiveDatabaseMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + listHiveDatabasesTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(listHiveDatabasesMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + updateHiveDatabaseTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateHiveDatabaseMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "hive_database.name", + String.valueOf(request.getHiveDatabase().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings deleteHiveDatabaseTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(deleteHiveDatabaseMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings createHiveTableTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createHiveTableMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings getHiveTableTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getHiveTableMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + listHiveTablesTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(listHiveTablesMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings updateHiveTableTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateHiveTableMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("hive_table.name", String.valueOf(request.getHiveTable().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings deleteHiveTableTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(deleteHiveTableMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + batchCreatePartitionsTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(batchCreatePartitionsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + batchDeletePartitionsTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(batchDeletePartitionsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + batchUpdatePartitionsTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(batchUpdatePartitionsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + listPartitionsTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(listPartitionsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + + this.createHiveCatalogCallable = + callableFactory.createUnaryCallable( + createHiveCatalogTransportSettings, + settings.createHiveCatalogSettings(), + clientContext); + this.getHiveCatalogCallable = + callableFactory.createUnaryCallable( + getHiveCatalogTransportSettings, settings.getHiveCatalogSettings(), clientContext); + this.listHiveCatalogsCallable = + callableFactory.createUnaryCallable( + listHiveCatalogsTransportSettings, settings.listHiveCatalogsSettings(), clientContext); + this.listHiveCatalogsPagedCallable = + callableFactory.createPagedCallable( + listHiveCatalogsTransportSettings, settings.listHiveCatalogsSettings(), clientContext); + this.updateHiveCatalogCallable = + callableFactory.createUnaryCallable( + updateHiveCatalogTransportSettings, + settings.updateHiveCatalogSettings(), + clientContext); + this.deleteHiveCatalogCallable = + callableFactory.createUnaryCallable( + deleteHiveCatalogTransportSettings, + settings.deleteHiveCatalogSettings(), + clientContext); + this.createHiveDatabaseCallable = + callableFactory.createUnaryCallable( + createHiveDatabaseTransportSettings, + settings.createHiveDatabaseSettings(), + clientContext); + this.getHiveDatabaseCallable = + callableFactory.createUnaryCallable( + getHiveDatabaseTransportSettings, settings.getHiveDatabaseSettings(), clientContext); + this.listHiveDatabasesCallable = + callableFactory.createUnaryCallable( + listHiveDatabasesTransportSettings, + settings.listHiveDatabasesSettings(), + clientContext); + this.listHiveDatabasesPagedCallable = + callableFactory.createPagedCallable( + listHiveDatabasesTransportSettings, + settings.listHiveDatabasesSettings(), + clientContext); + this.updateHiveDatabaseCallable = + callableFactory.createUnaryCallable( + updateHiveDatabaseTransportSettings, + settings.updateHiveDatabaseSettings(), + clientContext); + this.deleteHiveDatabaseCallable = + callableFactory.createUnaryCallable( + deleteHiveDatabaseTransportSettings, + settings.deleteHiveDatabaseSettings(), + clientContext); + this.createHiveTableCallable = + callableFactory.createUnaryCallable( + createHiveTableTransportSettings, settings.createHiveTableSettings(), clientContext); + this.getHiveTableCallable = + callableFactory.createUnaryCallable( + getHiveTableTransportSettings, settings.getHiveTableSettings(), clientContext); + this.listHiveTablesCallable = + callableFactory.createUnaryCallable( + listHiveTablesTransportSettings, settings.listHiveTablesSettings(), clientContext); + this.listHiveTablesPagedCallable = + callableFactory.createPagedCallable( + listHiveTablesTransportSettings, settings.listHiveTablesSettings(), clientContext); + this.updateHiveTableCallable = + callableFactory.createUnaryCallable( + updateHiveTableTransportSettings, settings.updateHiveTableSettings(), clientContext); + this.deleteHiveTableCallable = + callableFactory.createUnaryCallable( + deleteHiveTableTransportSettings, settings.deleteHiveTableSettings(), clientContext); + this.batchCreatePartitionsCallable = + callableFactory.createUnaryCallable( + batchCreatePartitionsTransportSettings, + settings.batchCreatePartitionsSettings(), + clientContext); + this.batchDeletePartitionsCallable = + callableFactory.createUnaryCallable( + batchDeletePartitionsTransportSettings, + settings.batchDeletePartitionsSettings(), + clientContext); + this.batchUpdatePartitionsCallable = + callableFactory.createUnaryCallable( + batchUpdatePartitionsTransportSettings, + settings.batchUpdatePartitionsSettings(), + clientContext); + this.listPartitionsCallable = + callableFactory.createServerStreamingCallable( + listPartitionsTransportSettings, settings.listPartitionsSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + @InternalApi + public static List getMethodDescriptors() { + List methodDescriptors = new ArrayList<>(); + methodDescriptors.add(createHiveCatalogMethodDescriptor); + methodDescriptors.add(getHiveCatalogMethodDescriptor); + methodDescriptors.add(listHiveCatalogsMethodDescriptor); + methodDescriptors.add(updateHiveCatalogMethodDescriptor); + methodDescriptors.add(deleteHiveCatalogMethodDescriptor); + methodDescriptors.add(createHiveDatabaseMethodDescriptor); + methodDescriptors.add(getHiveDatabaseMethodDescriptor); + methodDescriptors.add(listHiveDatabasesMethodDescriptor); + methodDescriptors.add(updateHiveDatabaseMethodDescriptor); + methodDescriptors.add(deleteHiveDatabaseMethodDescriptor); + methodDescriptors.add(createHiveTableMethodDescriptor); + methodDescriptors.add(getHiveTableMethodDescriptor); + methodDescriptors.add(listHiveTablesMethodDescriptor); + methodDescriptors.add(updateHiveTableMethodDescriptor); + methodDescriptors.add(deleteHiveTableMethodDescriptor); + methodDescriptors.add(batchCreatePartitionsMethodDescriptor); + methodDescriptors.add(batchDeletePartitionsMethodDescriptor); + methodDescriptors.add(batchUpdatePartitionsMethodDescriptor); + methodDescriptors.add(listPartitionsMethodDescriptor); + return methodDescriptors; + } + + @Override + public UnaryCallable createHiveCatalogCallable() { + return createHiveCatalogCallable; + } + + @Override + public UnaryCallable getHiveCatalogCallable() { + return getHiveCatalogCallable; + } + + @Override + public UnaryCallable + listHiveCatalogsCallable() { + return listHiveCatalogsCallable; + } + + @Override + public UnaryCallable + listHiveCatalogsPagedCallable() { + return listHiveCatalogsPagedCallable; + } + + @Override + public UnaryCallable updateHiveCatalogCallable() { + return updateHiveCatalogCallable; + } + + @Override + public UnaryCallable deleteHiveCatalogCallable() { + return deleteHiveCatalogCallable; + } + + @Override + public UnaryCallable createHiveDatabaseCallable() { + return createHiveDatabaseCallable; + } + + @Override + public UnaryCallable getHiveDatabaseCallable() { + return getHiveDatabaseCallable; + } + + @Override + public UnaryCallable + listHiveDatabasesCallable() { + return listHiveDatabasesCallable; + } + + @Override + public UnaryCallable + listHiveDatabasesPagedCallable() { + return listHiveDatabasesPagedCallable; + } + + @Override + public UnaryCallable updateHiveDatabaseCallable() { + return updateHiveDatabaseCallable; + } + + @Override + public UnaryCallable deleteHiveDatabaseCallable() { + return deleteHiveDatabaseCallable; + } + + @Override + public UnaryCallable createHiveTableCallable() { + return createHiveTableCallable; + } + + @Override + public UnaryCallable getHiveTableCallable() { + return getHiveTableCallable; + } + + @Override + public UnaryCallable listHiveTablesCallable() { + return listHiveTablesCallable; + } + + @Override + public UnaryCallable + listHiveTablesPagedCallable() { + return listHiveTablesPagedCallable; + } + + @Override + public UnaryCallable updateHiveTableCallable() { + return updateHiveTableCallable; + } + + @Override + public UnaryCallable deleteHiveTableCallable() { + return deleteHiveTableCallable; + } + + @Override + public UnaryCallable + batchCreatePartitionsCallable() { + return batchCreatePartitionsCallable; + } + + @Override + public UnaryCallable batchDeletePartitionsCallable() { + return batchDeletePartitionsCallable; + } + + @Override + public UnaryCallable + batchUpdatePartitionsCallable() { + return batchUpdatePartitionsCallable; + } + + @Override + public ServerStreamingCallable + listPartitionsCallable() { + return listPartitionsCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-biglake/google-cloud-biglake/src/main/resources/META-INF/native-image/com.google.cloud.biglake.hive.v1beta/reflect-config.json b/java-biglake/google-cloud-biglake/src/main/resources/META-INF/native-image/com.google.cloud.biglake.hive.v1beta/reflect-config.json new file mode 100644 index 000000000000..316fd6a9c579 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/main/resources/META-INF/native-image/com.google.cloud.biglake.hive.v1beta/reflect-config.json @@ -0,0 +1,2018 @@ +[ + { + "name": "com.google.api.ClientLibraryDestination", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibraryOrganization", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.LaunchStage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$History", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Style", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.FieldSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.FieldSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.HiveCatalog", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.HiveCatalog$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.HiveCatalog$Replica", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.HiveCatalog$Replica$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.HiveCatalog$Replica$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.HiveDatabase", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.HiveDatabase$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.HiveTable", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.HiveTable$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.Partition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.Partition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.PartitionValues", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.PartitionValues$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.SerdeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.SerdeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.SerdeInfo$SerdeType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.StorageDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.StorageDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.StorageDescriptor$Order", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.StorageDescriptor$Order$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.StorageDescriptor$SkewedInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.StorageDescriptor$SkewedInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.StorageDescriptor$SkewedInfo$SkewedColumnValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.StorageDescriptor$SkewedInfo$SkewedColumnValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.StorageDescriptor$SkewedInfo$SkewedKeyValuesLocation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.StorageDescriptor$SkewedInfo$SkewedKeyValuesLocation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$VerificationState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnforceNamingStyle", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$FieldPresence", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$JsonFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$MessageEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$RepeatedFieldEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Utf8Validation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$DefaultSymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Label", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$CType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$JSType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionTargetType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$OptimizeMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Semantic", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$IdempotencyLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-biglake/google-cloud-biglake/src/test/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceClientHttpJsonTest.java b/java-biglake/google-cloud-biglake/src/test/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceClientHttpJsonTest.java new file mode 100644 index 000000000000..2c59fd85ca83 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/test/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceClientHttpJsonTest.java @@ -0,0 +1,1693 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta; + +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveCatalogsPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveDatabasesPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveTablesPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.httpjson.GaxHttpJsonProperties; +import com.google.api.gax.httpjson.testing.MockHttpService; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.testing.FakeStatusCode; +import com.google.cloud.biglake.hive.v1beta.stub.HttpJsonHiveMetastoreServiceStub; +import com.google.common.collect.Lists; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class HiveMetastoreServiceClientHttpJsonTest { + private static MockHttpService mockService; + private static HiveMetastoreServiceClient client; + + @BeforeClass + public static void startStaticServer() throws IOException { + mockService = + new MockHttpService( + HttpJsonHiveMetastoreServiceStub.getMethodDescriptors(), + HiveMetastoreServiceSettings.getDefaultEndpoint()); + HiveMetastoreServiceSettings settings = + HiveMetastoreServiceSettings.newHttpJsonBuilder() + .setTransportChannelProvider( + HiveMetastoreServiceSettings.defaultHttpJsonTransportProviderBuilder() + .setHttpTransport(mockService) + .build()) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = HiveMetastoreServiceClient.create(settings); + } + + @AfterClass + public static void stopServer() { + client.close(); + } + + @Before + public void setUp() {} + + @After + public void tearDown() throws Exception { + mockService.reset(); + } + + @Test + public void createHiveCatalogTest() throws Exception { + HiveCatalog expectedResponse = + HiveCatalog.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .addAllReplicas(new ArrayList()) + .build(); + mockService.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + String hiveCatalogId = "hiveCatalogId-575314556"; + + HiveCatalog actualResponse = client.createHiveCatalog(parent, hiveCatalog, hiveCatalogId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createHiveCatalogExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + String hiveCatalogId = "hiveCatalogId-575314556"; + client.createHiveCatalog(parent, hiveCatalog, hiveCatalogId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createHiveCatalogTest2() throws Exception { + HiveCatalog expectedResponse = + HiveCatalog.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .addAllReplicas(new ArrayList()) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-2353"; + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + String hiveCatalogId = "hiveCatalogId-575314556"; + + HiveCatalog actualResponse = client.createHiveCatalog(parent, hiveCatalog, hiveCatalogId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createHiveCatalogExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-2353"; + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + String hiveCatalogId = "hiveCatalogId-575314556"; + client.createHiveCatalog(parent, hiveCatalog, hiveCatalogId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getHiveCatalogTest() throws Exception { + HiveCatalog expectedResponse = + HiveCatalog.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .addAllReplicas(new ArrayList()) + .build(); + mockService.addResponse(expectedResponse); + + CatalogName name = CatalogName.of("[PROJECT]", "[CATALOG]"); + + HiveCatalog actualResponse = client.getHiveCatalog(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getHiveCatalogExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + CatalogName name = CatalogName.of("[PROJECT]", "[CATALOG]"); + client.getHiveCatalog(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getHiveCatalogTest2() throws Exception { + HiveCatalog expectedResponse = + HiveCatalog.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .addAllReplicas(new ArrayList()) + .build(); + mockService.addResponse(expectedResponse); + + String name = "projects/project-9652/catalogs/catalog-9652"; + + HiveCatalog actualResponse = client.getHiveCatalog(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getHiveCatalogExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "projects/project-9652/catalogs/catalog-9652"; + client.getHiveCatalog(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listHiveCatalogsTest() throws Exception { + HiveCatalog responsesElement = HiveCatalog.newBuilder().build(); + ListHiveCatalogsResponse expectedResponse = + ListHiveCatalogsResponse.newBuilder() + .setNextPageToken("") + .addAllCatalogs(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + + ListHiveCatalogsPagedResponse pagedListResponse = client.listHiveCatalogs(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getCatalogsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listHiveCatalogsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + client.listHiveCatalogs(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listHiveCatalogsTest2() throws Exception { + HiveCatalog responsesElement = HiveCatalog.newBuilder().build(); + ListHiveCatalogsResponse expectedResponse = + ListHiveCatalogsResponse.newBuilder() + .setNextPageToken("") + .addAllCatalogs(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-2353"; + + ListHiveCatalogsPagedResponse pagedListResponse = client.listHiveCatalogs(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getCatalogsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listHiveCatalogsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-2353"; + client.listHiveCatalogs(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateHiveCatalogTest() throws Exception { + HiveCatalog expectedResponse = + HiveCatalog.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .addAllReplicas(new ArrayList()) + .build(); + mockService.addResponse(expectedResponse); + + HiveCatalog hiveCatalog = + HiveCatalog.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .addAllReplicas(new ArrayList()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + HiveCatalog actualResponse = client.updateHiveCatalog(hiveCatalog, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateHiveCatalogExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + HiveCatalog hiveCatalog = + HiveCatalog.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .addAllReplicas(new ArrayList()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateHiveCatalog(hiveCatalog, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteHiveCatalogTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + CatalogName name = CatalogName.of("[PROJECT]", "[CATALOG]"); + + client.deleteHiveCatalog(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteHiveCatalogExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + CatalogName name = CatalogName.of("[PROJECT]", "[CATALOG]"); + client.deleteHiveCatalog(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteHiveCatalogTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = "projects/project-9652/catalogs/catalog-9652"; + + client.deleteHiveCatalog(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteHiveCatalogExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "projects/project-9652/catalogs/catalog-9652"; + client.deleteHiveCatalog(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createHiveDatabaseTest() throws Exception { + HiveDatabase expectedResponse = + HiveDatabase.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .putAllParameters(new HashMap()) + .build(); + mockService.addResponse(expectedResponse); + + CatalogName parent = CatalogName.of("[PROJECT]", "[CATALOG]"); + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + String hiveDatabaseId = "hiveDatabaseId-1150232698"; + + HiveDatabase actualResponse = client.createHiveDatabase(parent, hiveDatabase, hiveDatabaseId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createHiveDatabaseExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + CatalogName parent = CatalogName.of("[PROJECT]", "[CATALOG]"); + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + String hiveDatabaseId = "hiveDatabaseId-1150232698"; + client.createHiveDatabase(parent, hiveDatabase, hiveDatabaseId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createHiveDatabaseTest2() throws Exception { + HiveDatabase expectedResponse = + HiveDatabase.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .putAllParameters(new HashMap()) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-2565/catalogs/catalog-2565"; + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + String hiveDatabaseId = "hiveDatabaseId-1150232698"; + + HiveDatabase actualResponse = client.createHiveDatabase(parent, hiveDatabase, hiveDatabaseId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createHiveDatabaseExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-2565/catalogs/catalog-2565"; + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + String hiveDatabaseId = "hiveDatabaseId-1150232698"; + client.createHiveDatabase(parent, hiveDatabase, hiveDatabaseId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getHiveDatabaseTest() throws Exception { + HiveDatabase expectedResponse = + HiveDatabase.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .putAllParameters(new HashMap()) + .build(); + mockService.addResponse(expectedResponse); + + NamespaceName name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + + HiveDatabase actualResponse = client.getHiveDatabase(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getHiveDatabaseExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + NamespaceName name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + client.getHiveDatabase(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getHiveDatabaseTest2() throws Exception { + HiveDatabase expectedResponse = + HiveDatabase.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .putAllParameters(new HashMap()) + .build(); + mockService.addResponse(expectedResponse); + + String name = "projects/project-2808/catalogs/catalog-2808/databases/database-2808"; + + HiveDatabase actualResponse = client.getHiveDatabase(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getHiveDatabaseExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "projects/project-2808/catalogs/catalog-2808/databases/database-2808"; + client.getHiveDatabase(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listHiveDatabasesTest() throws Exception { + HiveDatabase responsesElement = HiveDatabase.newBuilder().build(); + ListHiveDatabasesResponse expectedResponse = + ListHiveDatabasesResponse.newBuilder() + .setNextPageToken("") + .addAllDatabases(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + CatalogName parent = CatalogName.of("[PROJECT]", "[CATALOG]"); + + ListHiveDatabasesPagedResponse pagedListResponse = client.listHiveDatabases(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getDatabasesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listHiveDatabasesExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + CatalogName parent = CatalogName.of("[PROJECT]", "[CATALOG]"); + client.listHiveDatabases(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listHiveDatabasesTest2() throws Exception { + HiveDatabase responsesElement = HiveDatabase.newBuilder().build(); + ListHiveDatabasesResponse expectedResponse = + ListHiveDatabasesResponse.newBuilder() + .setNextPageToken("") + .addAllDatabases(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-2565/catalogs/catalog-2565"; + + ListHiveDatabasesPagedResponse pagedListResponse = client.listHiveDatabases(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getDatabasesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listHiveDatabasesExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-2565/catalogs/catalog-2565"; + client.listHiveDatabases(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateHiveDatabaseTest() throws Exception { + HiveDatabase expectedResponse = + HiveDatabase.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .putAllParameters(new HashMap()) + .build(); + mockService.addResponse(expectedResponse); + + HiveDatabase hiveDatabase = + HiveDatabase.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .putAllParameters(new HashMap()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + HiveDatabase actualResponse = client.updateHiveDatabase(hiveDatabase, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateHiveDatabaseExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + HiveDatabase hiveDatabase = + HiveDatabase.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .putAllParameters(new HashMap()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateHiveDatabase(hiveDatabase, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteHiveDatabaseTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + NamespaceName name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + + client.deleteHiveDatabase(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteHiveDatabaseExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + NamespaceName name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + client.deleteHiveDatabase(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteHiveDatabaseTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = "projects/project-2808/catalogs/catalog-2808/databases/database-2808"; + + client.deleteHiveDatabase(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteHiveDatabaseExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "projects/project-2808/catalogs/catalog-2808/databases/database-2808"; + client.deleteHiveDatabase(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createHiveTableTest() throws Exception { + HiveTable expectedResponse = + HiveTable.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setDescription("description-1724546052") + .setStorageDescriptor(StorageDescriptor.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllPartitionKeys(new ArrayList()) + .putAllParameters(new HashMap()) + .setTableType("tableType-1988515800") + .build(); + mockService.addResponse(expectedResponse); + + NamespaceName parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + HiveTable hiveTable = HiveTable.newBuilder().build(); + String hiveTableId = "hiveTableId152241145"; + + HiveTable actualResponse = client.createHiveTable(parent, hiveTable, hiveTableId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createHiveTableExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + NamespaceName parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + HiveTable hiveTable = HiveTable.newBuilder().build(); + String hiveTableId = "hiveTableId152241145"; + client.createHiveTable(parent, hiveTable, hiveTableId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createHiveTableTest2() throws Exception { + HiveTable expectedResponse = + HiveTable.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setDescription("description-1724546052") + .setStorageDescriptor(StorageDescriptor.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllPartitionKeys(new ArrayList()) + .putAllParameters(new HashMap()) + .setTableType("tableType-1988515800") + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-9879/catalogs/catalog-9879/databases/database-9879"; + HiveTable hiveTable = HiveTable.newBuilder().build(); + String hiveTableId = "hiveTableId152241145"; + + HiveTable actualResponse = client.createHiveTable(parent, hiveTable, hiveTableId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createHiveTableExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-9879/catalogs/catalog-9879/databases/database-9879"; + HiveTable hiveTable = HiveTable.newBuilder().build(); + String hiveTableId = "hiveTableId152241145"; + client.createHiveTable(parent, hiveTable, hiveTableId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getHiveTableTest() throws Exception { + HiveTable expectedResponse = + HiveTable.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setDescription("description-1724546052") + .setStorageDescriptor(StorageDescriptor.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllPartitionKeys(new ArrayList()) + .putAllParameters(new HashMap()) + .setTableType("tableType-1988515800") + .build(); + mockService.addResponse(expectedResponse); + + TableName name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + + HiveTable actualResponse = client.getHiveTable(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getHiveTableExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + TableName name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + client.getHiveTable(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getHiveTableTest2() throws Exception { + HiveTable expectedResponse = + HiveTable.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setDescription("description-1724546052") + .setStorageDescriptor(StorageDescriptor.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllPartitionKeys(new ArrayList()) + .putAllParameters(new HashMap()) + .setTableType("tableType-1988515800") + .build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-9385/catalogs/catalog-9385/databases/database-9385/tables/table-9385"; + + HiveTable actualResponse = client.getHiveTable(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getHiveTableExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-9385/catalogs/catalog-9385/databases/database-9385/tables/table-9385"; + client.getHiveTable(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listHiveTablesTest() throws Exception { + HiveTable responsesElement = HiveTable.newBuilder().build(); + ListHiveTablesResponse expectedResponse = + ListHiveTablesResponse.newBuilder() + .setNextPageToken("") + .addAllTables(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + NamespaceName parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + + ListHiveTablesPagedResponse pagedListResponse = client.listHiveTables(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getTablesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listHiveTablesExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + NamespaceName parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + client.listHiveTables(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listHiveTablesTest2() throws Exception { + HiveTable responsesElement = HiveTable.newBuilder().build(); + ListHiveTablesResponse expectedResponse = + ListHiveTablesResponse.newBuilder() + .setNextPageToken("") + .addAllTables(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-9879/catalogs/catalog-9879/databases/database-9879"; + + ListHiveTablesPagedResponse pagedListResponse = client.listHiveTables(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getTablesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listHiveTablesExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-9879/catalogs/catalog-9879/databases/database-9879"; + client.listHiveTables(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateHiveTableTest() throws Exception { + HiveTable expectedResponse = + HiveTable.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setDescription("description-1724546052") + .setStorageDescriptor(StorageDescriptor.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllPartitionKeys(new ArrayList()) + .putAllParameters(new HashMap()) + .setTableType("tableType-1988515800") + .build(); + mockService.addResponse(expectedResponse); + + HiveTable hiveTable = + HiveTable.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setDescription("description-1724546052") + .setStorageDescriptor(StorageDescriptor.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllPartitionKeys(new ArrayList()) + .putAllParameters(new HashMap()) + .setTableType("tableType-1988515800") + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + HiveTable actualResponse = client.updateHiveTable(hiveTable, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateHiveTableExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + HiveTable hiveTable = + HiveTable.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setDescription("description-1724546052") + .setStorageDescriptor(StorageDescriptor.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllPartitionKeys(new ArrayList()) + .putAllParameters(new HashMap()) + .setTableType("tableType-1988515800") + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateHiveTable(hiveTable, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteHiveTableTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + TableName name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + + client.deleteHiveTable(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteHiveTableExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + TableName name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + client.deleteHiveTable(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteHiveTableTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-9385/catalogs/catalog-9385/databases/database-9385/tables/table-9385"; + + client.deleteHiveTable(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteHiveTableExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-9385/catalogs/catalog-9385/databases/database-9385/tables/table-9385"; + client.deleteHiveTable(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchCreatePartitionsTest() throws Exception { + BatchCreatePartitionsResponse expectedResponse = + BatchCreatePartitionsResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .build(); + mockService.addResponse(expectedResponse); + + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + + BatchCreatePartitionsResponse actualResponse = client.batchCreatePartitions(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void batchCreatePartitionsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + client.batchCreatePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchCreatePartitionsTest2() throws Exception { + BatchCreatePartitionsResponse expectedResponse = + BatchCreatePartitionsResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .build(); + mockService.addResponse(expectedResponse); + + String parent = + "projects/project-3670/catalogs/catalog-3670/databases/database-3670/tables/table-3670"; + + BatchCreatePartitionsResponse actualResponse = client.batchCreatePartitions(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void batchCreatePartitionsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = + "projects/project-3670/catalogs/catalog-3670/databases/database-3670/tables/table-3670"; + client.batchCreatePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchDeletePartitionsTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + + client.batchDeletePartitions(parent); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void batchDeletePartitionsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + client.batchDeletePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchDeletePartitionsTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String parent = + "projects/project-3670/catalogs/catalog-3670/databases/database-3670/tables/table-3670"; + + client.batchDeletePartitions(parent); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void batchDeletePartitionsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = + "projects/project-3670/catalogs/catalog-3670/databases/database-3670/tables/table-3670"; + client.batchDeletePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchUpdatePartitionsTest() throws Exception { + BatchUpdatePartitionsResponse expectedResponse = + BatchUpdatePartitionsResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .build(); + mockService.addResponse(expectedResponse); + + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + + BatchUpdatePartitionsResponse actualResponse = client.batchUpdatePartitions(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void batchUpdatePartitionsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + client.batchUpdatePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchUpdatePartitionsTest2() throws Exception { + BatchUpdatePartitionsResponse expectedResponse = + BatchUpdatePartitionsResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .build(); + mockService.addResponse(expectedResponse); + + String parent = + "projects/project-3670/catalogs/catalog-3670/databases/database-3670/tables/table-3670"; + + BatchUpdatePartitionsResponse actualResponse = client.batchUpdatePartitions(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void batchUpdatePartitionsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = + "projects/project-3670/catalogs/catalog-3670/databases/database-3670/tables/table-3670"; + client.batchUpdatePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listPartitionsTest() throws Exception {} + + @Test + public void listPartitionsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + } +} diff --git a/java-biglake/google-cloud-biglake/src/test/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceClientTest.java b/java-biglake/google-cloud-biglake/src/test/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceClientTest.java new file mode 100644 index 000000000000..0f13940c9179 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/test/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceClientTest.java @@ -0,0 +1,1508 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta; + +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveCatalogsPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveDatabasesPagedResponse; +import static com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient.ListHiveTablesPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StatusCode; +import com.google.common.collect.Lists; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class HiveMetastoreServiceClientTest { + private static MockHiveMetastoreService mockHiveMetastoreService; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private HiveMetastoreServiceClient client; + + @BeforeClass + public static void startStaticServer() { + mockHiveMetastoreService = new MockHiveMetastoreService(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockHiveMetastoreService)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + HiveMetastoreServiceSettings settings = + HiveMetastoreServiceSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = HiveMetastoreServiceClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void createHiveCatalogTest() throws Exception { + HiveCatalog expectedResponse = + HiveCatalog.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .addAllReplicas(new ArrayList()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + String hiveCatalogId = "hiveCatalogId-575314556"; + + HiveCatalog actualResponse = client.createHiveCatalog(parent, hiveCatalog, hiveCatalogId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateHiveCatalogRequest actualRequest = ((CreateHiveCatalogRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(hiveCatalog, actualRequest.getHiveCatalog()); + Assert.assertEquals(hiveCatalogId, actualRequest.getHiveCatalogId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createHiveCatalogExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + String hiveCatalogId = "hiveCatalogId-575314556"; + client.createHiveCatalog(parent, hiveCatalog, hiveCatalogId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createHiveCatalogTest2() throws Exception { + HiveCatalog expectedResponse = + HiveCatalog.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .addAllReplicas(new ArrayList()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String parent = "parent-995424086"; + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + String hiveCatalogId = "hiveCatalogId-575314556"; + + HiveCatalog actualResponse = client.createHiveCatalog(parent, hiveCatalog, hiveCatalogId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateHiveCatalogRequest actualRequest = ((CreateHiveCatalogRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(hiveCatalog, actualRequest.getHiveCatalog()); + Assert.assertEquals(hiveCatalogId, actualRequest.getHiveCatalogId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createHiveCatalogExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String parent = "parent-995424086"; + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + String hiveCatalogId = "hiveCatalogId-575314556"; + client.createHiveCatalog(parent, hiveCatalog, hiveCatalogId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getHiveCatalogTest() throws Exception { + HiveCatalog expectedResponse = + HiveCatalog.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .addAllReplicas(new ArrayList()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + CatalogName name = CatalogName.of("[PROJECT]", "[CATALOG]"); + + HiveCatalog actualResponse = client.getHiveCatalog(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetHiveCatalogRequest actualRequest = ((GetHiveCatalogRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getHiveCatalogExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + CatalogName name = CatalogName.of("[PROJECT]", "[CATALOG]"); + client.getHiveCatalog(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getHiveCatalogTest2() throws Exception { + HiveCatalog expectedResponse = + HiveCatalog.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .addAllReplicas(new ArrayList()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String name = "name3373707"; + + HiveCatalog actualResponse = client.getHiveCatalog(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetHiveCatalogRequest actualRequest = ((GetHiveCatalogRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getHiveCatalogExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String name = "name3373707"; + client.getHiveCatalog(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listHiveCatalogsTest() throws Exception { + HiveCatalog responsesElement = HiveCatalog.newBuilder().build(); + ListHiveCatalogsResponse expectedResponse = + ListHiveCatalogsResponse.newBuilder() + .setNextPageToken("") + .addAllCatalogs(Arrays.asList(responsesElement)) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + + ListHiveCatalogsPagedResponse pagedListResponse = client.listHiveCatalogs(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getCatalogsList().get(0), resources.get(0)); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListHiveCatalogsRequest actualRequest = ((ListHiveCatalogsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listHiveCatalogsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + client.listHiveCatalogs(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listHiveCatalogsTest2() throws Exception { + HiveCatalog responsesElement = HiveCatalog.newBuilder().build(); + ListHiveCatalogsResponse expectedResponse = + ListHiveCatalogsResponse.newBuilder() + .setNextPageToken("") + .addAllCatalogs(Arrays.asList(responsesElement)) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListHiveCatalogsPagedResponse pagedListResponse = client.listHiveCatalogs(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getCatalogsList().get(0), resources.get(0)); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListHiveCatalogsRequest actualRequest = ((ListHiveCatalogsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listHiveCatalogsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String parent = "parent-995424086"; + client.listHiveCatalogs(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateHiveCatalogTest() throws Exception { + HiveCatalog expectedResponse = + HiveCatalog.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .addAllReplicas(new ArrayList()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + HiveCatalog actualResponse = client.updateHiveCatalog(hiveCatalog, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateHiveCatalogRequest actualRequest = ((UpdateHiveCatalogRequest) actualRequests.get(0)); + + Assert.assertEquals(hiveCatalog, actualRequest.getHiveCatalog()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateHiveCatalogExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateHiveCatalog(hiveCatalog, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteHiveCatalogTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + CatalogName name = CatalogName.of("[PROJECT]", "[CATALOG]"); + + client.deleteHiveCatalog(name); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteHiveCatalogRequest actualRequest = ((DeleteHiveCatalogRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteHiveCatalogExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + CatalogName name = CatalogName.of("[PROJECT]", "[CATALOG]"); + client.deleteHiveCatalog(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteHiveCatalogTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteHiveCatalog(name); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteHiveCatalogRequest actualRequest = ((DeleteHiveCatalogRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteHiveCatalogExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String name = "name3373707"; + client.deleteHiveCatalog(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createHiveDatabaseTest() throws Exception { + HiveDatabase expectedResponse = + HiveDatabase.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .putAllParameters(new HashMap()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + CatalogName parent = CatalogName.of("[PROJECT]", "[CATALOG]"); + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + String hiveDatabaseId = "hiveDatabaseId-1150232698"; + + HiveDatabase actualResponse = client.createHiveDatabase(parent, hiveDatabase, hiveDatabaseId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateHiveDatabaseRequest actualRequest = ((CreateHiveDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(hiveDatabase, actualRequest.getHiveDatabase()); + Assert.assertEquals(hiveDatabaseId, actualRequest.getHiveDatabaseId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createHiveDatabaseExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + CatalogName parent = CatalogName.of("[PROJECT]", "[CATALOG]"); + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + String hiveDatabaseId = "hiveDatabaseId-1150232698"; + client.createHiveDatabase(parent, hiveDatabase, hiveDatabaseId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createHiveDatabaseTest2() throws Exception { + HiveDatabase expectedResponse = + HiveDatabase.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .putAllParameters(new HashMap()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String parent = "parent-995424086"; + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + String hiveDatabaseId = "hiveDatabaseId-1150232698"; + + HiveDatabase actualResponse = client.createHiveDatabase(parent, hiveDatabase, hiveDatabaseId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateHiveDatabaseRequest actualRequest = ((CreateHiveDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(hiveDatabase, actualRequest.getHiveDatabase()); + Assert.assertEquals(hiveDatabaseId, actualRequest.getHiveDatabaseId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createHiveDatabaseExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String parent = "parent-995424086"; + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + String hiveDatabaseId = "hiveDatabaseId-1150232698"; + client.createHiveDatabase(parent, hiveDatabase, hiveDatabaseId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getHiveDatabaseTest() throws Exception { + HiveDatabase expectedResponse = + HiveDatabase.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .putAllParameters(new HashMap()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + NamespaceName name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + + HiveDatabase actualResponse = client.getHiveDatabase(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetHiveDatabaseRequest actualRequest = ((GetHiveDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getHiveDatabaseExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + NamespaceName name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + client.getHiveDatabase(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getHiveDatabaseTest2() throws Exception { + HiveDatabase expectedResponse = + HiveDatabase.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .putAllParameters(new HashMap()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String name = "name3373707"; + + HiveDatabase actualResponse = client.getHiveDatabase(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetHiveDatabaseRequest actualRequest = ((GetHiveDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getHiveDatabaseExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String name = "name3373707"; + client.getHiveDatabase(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listHiveDatabasesTest() throws Exception { + HiveDatabase responsesElement = HiveDatabase.newBuilder().build(); + ListHiveDatabasesResponse expectedResponse = + ListHiveDatabasesResponse.newBuilder() + .setNextPageToken("") + .addAllDatabases(Arrays.asList(responsesElement)) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + CatalogName parent = CatalogName.of("[PROJECT]", "[CATALOG]"); + + ListHiveDatabasesPagedResponse pagedListResponse = client.listHiveDatabases(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getDatabasesList().get(0), resources.get(0)); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListHiveDatabasesRequest actualRequest = ((ListHiveDatabasesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listHiveDatabasesExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + CatalogName parent = CatalogName.of("[PROJECT]", "[CATALOG]"); + client.listHiveDatabases(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listHiveDatabasesTest2() throws Exception { + HiveDatabase responsesElement = HiveDatabase.newBuilder().build(); + ListHiveDatabasesResponse expectedResponse = + ListHiveDatabasesResponse.newBuilder() + .setNextPageToken("") + .addAllDatabases(Arrays.asList(responsesElement)) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListHiveDatabasesPagedResponse pagedListResponse = client.listHiveDatabases(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getDatabasesList().get(0), resources.get(0)); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListHiveDatabasesRequest actualRequest = ((ListHiveDatabasesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listHiveDatabasesExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String parent = "parent-995424086"; + client.listHiveDatabases(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateHiveDatabaseTest() throws Exception { + HiveDatabase expectedResponse = + HiveDatabase.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setDescription("description-1724546052") + .setLocationUri("locationUri552310135") + .putAllParameters(new HashMap()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + HiveDatabase actualResponse = client.updateHiveDatabase(hiveDatabase, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateHiveDatabaseRequest actualRequest = ((UpdateHiveDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(hiveDatabase, actualRequest.getHiveDatabase()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateHiveDatabaseExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateHiveDatabase(hiveDatabase, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteHiveDatabaseTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + NamespaceName name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + + client.deleteHiveDatabase(name); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteHiveDatabaseRequest actualRequest = ((DeleteHiveDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteHiveDatabaseExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + NamespaceName name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + client.deleteHiveDatabase(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteHiveDatabaseTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteHiveDatabase(name); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteHiveDatabaseRequest actualRequest = ((DeleteHiveDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteHiveDatabaseExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String name = "name3373707"; + client.deleteHiveDatabase(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createHiveTableTest() throws Exception { + HiveTable expectedResponse = + HiveTable.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setDescription("description-1724546052") + .setStorageDescriptor(StorageDescriptor.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllPartitionKeys(new ArrayList()) + .putAllParameters(new HashMap()) + .setTableType("tableType-1988515800") + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + NamespaceName parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + HiveTable hiveTable = HiveTable.newBuilder().build(); + String hiveTableId = "hiveTableId152241145"; + + HiveTable actualResponse = client.createHiveTable(parent, hiveTable, hiveTableId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateHiveTableRequest actualRequest = ((CreateHiveTableRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(hiveTable, actualRequest.getHiveTable()); + Assert.assertEquals(hiveTableId, actualRequest.getHiveTableId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createHiveTableExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + NamespaceName parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + HiveTable hiveTable = HiveTable.newBuilder().build(); + String hiveTableId = "hiveTableId152241145"; + client.createHiveTable(parent, hiveTable, hiveTableId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createHiveTableTest2() throws Exception { + HiveTable expectedResponse = + HiveTable.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setDescription("description-1724546052") + .setStorageDescriptor(StorageDescriptor.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllPartitionKeys(new ArrayList()) + .putAllParameters(new HashMap()) + .setTableType("tableType-1988515800") + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String parent = "parent-995424086"; + HiveTable hiveTable = HiveTable.newBuilder().build(); + String hiveTableId = "hiveTableId152241145"; + + HiveTable actualResponse = client.createHiveTable(parent, hiveTable, hiveTableId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateHiveTableRequest actualRequest = ((CreateHiveTableRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(hiveTable, actualRequest.getHiveTable()); + Assert.assertEquals(hiveTableId, actualRequest.getHiveTableId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createHiveTableExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String parent = "parent-995424086"; + HiveTable hiveTable = HiveTable.newBuilder().build(); + String hiveTableId = "hiveTableId152241145"; + client.createHiveTable(parent, hiveTable, hiveTableId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getHiveTableTest() throws Exception { + HiveTable expectedResponse = + HiveTable.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setDescription("description-1724546052") + .setStorageDescriptor(StorageDescriptor.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllPartitionKeys(new ArrayList()) + .putAllParameters(new HashMap()) + .setTableType("tableType-1988515800") + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + TableName name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + + HiveTable actualResponse = client.getHiveTable(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetHiveTableRequest actualRequest = ((GetHiveTableRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getHiveTableExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + TableName name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + client.getHiveTable(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getHiveTableTest2() throws Exception { + HiveTable expectedResponse = + HiveTable.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setDescription("description-1724546052") + .setStorageDescriptor(StorageDescriptor.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllPartitionKeys(new ArrayList()) + .putAllParameters(new HashMap()) + .setTableType("tableType-1988515800") + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String name = "name3373707"; + + HiveTable actualResponse = client.getHiveTable(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetHiveTableRequest actualRequest = ((GetHiveTableRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getHiveTableExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String name = "name3373707"; + client.getHiveTable(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listHiveTablesTest() throws Exception { + HiveTable responsesElement = HiveTable.newBuilder().build(); + ListHiveTablesResponse expectedResponse = + ListHiveTablesResponse.newBuilder() + .setNextPageToken("") + .addAllTables(Arrays.asList(responsesElement)) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + NamespaceName parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + + ListHiveTablesPagedResponse pagedListResponse = client.listHiveTables(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getTablesList().get(0), resources.get(0)); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListHiveTablesRequest actualRequest = ((ListHiveTablesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listHiveTablesExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + NamespaceName parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + client.listHiveTables(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listHiveTablesTest2() throws Exception { + HiveTable responsesElement = HiveTable.newBuilder().build(); + ListHiveTablesResponse expectedResponse = + ListHiveTablesResponse.newBuilder() + .setNextPageToken("") + .addAllTables(Arrays.asList(responsesElement)) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListHiveTablesPagedResponse pagedListResponse = client.listHiveTables(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getTablesList().get(0), resources.get(0)); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListHiveTablesRequest actualRequest = ((ListHiveTablesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listHiveTablesExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String parent = "parent-995424086"; + client.listHiveTables(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateHiveTableTest() throws Exception { + HiveTable expectedResponse = + HiveTable.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setDescription("description-1724546052") + .setStorageDescriptor(StorageDescriptor.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllPartitionKeys(new ArrayList()) + .putAllParameters(new HashMap()) + .setTableType("tableType-1988515800") + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + HiveTable hiveTable = HiveTable.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + HiveTable actualResponse = client.updateHiveTable(hiveTable, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateHiveTableRequest actualRequest = ((UpdateHiveTableRequest) actualRequests.get(0)); + + Assert.assertEquals(hiveTable, actualRequest.getHiveTable()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateHiveTableExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + HiveTable hiveTable = HiveTable.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateHiveTable(hiveTable, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteHiveTableTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + TableName name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + + client.deleteHiveTable(name); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteHiveTableRequest actualRequest = ((DeleteHiveTableRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteHiveTableExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + TableName name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + client.deleteHiveTable(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteHiveTableTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteHiveTable(name); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteHiveTableRequest actualRequest = ((DeleteHiveTableRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteHiveTableExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String name = "name3373707"; + client.deleteHiveTable(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchCreatePartitionsTest() throws Exception { + BatchCreatePartitionsResponse expectedResponse = + BatchCreatePartitionsResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + + BatchCreatePartitionsResponse actualResponse = client.batchCreatePartitions(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchCreatePartitionsRequest actualRequest = + ((BatchCreatePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchCreatePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + client.batchCreatePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchCreatePartitionsTest2() throws Exception { + BatchCreatePartitionsResponse expectedResponse = + BatchCreatePartitionsResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + BatchCreatePartitionsResponse actualResponse = client.batchCreatePartitions(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchCreatePartitionsRequest actualRequest = + ((BatchCreatePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchCreatePartitionsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String parent = "parent-995424086"; + client.batchCreatePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchDeletePartitionsTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + + client.batchDeletePartitions(parent); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchDeletePartitionsRequest actualRequest = + ((BatchDeletePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchDeletePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + client.batchDeletePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchDeletePartitionsTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + client.batchDeletePartitions(parent); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchDeletePartitionsRequest actualRequest = + ((BatchDeletePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchDeletePartitionsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String parent = "parent-995424086"; + client.batchDeletePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchUpdatePartitionsTest() throws Exception { + BatchUpdatePartitionsResponse expectedResponse = + BatchUpdatePartitionsResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + + BatchUpdatePartitionsResponse actualResponse = client.batchUpdatePartitions(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchUpdatePartitionsRequest actualRequest = + ((BatchUpdatePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchUpdatePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + client.batchUpdatePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchUpdatePartitionsTest2() throws Exception { + BatchUpdatePartitionsResponse expectedResponse = + BatchUpdatePartitionsResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .build(); + mockHiveMetastoreService.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + BatchUpdatePartitionsResponse actualResponse = client.batchUpdatePartitions(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockHiveMetastoreService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchUpdatePartitionsRequest actualRequest = + ((BatchUpdatePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchUpdatePartitionsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + + try { + String parent = "parent-995424086"; + client.batchUpdatePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listPartitionsTest() throws Exception { + ListPartitionsResponse expectedResponse = + ListPartitionsResponse.newBuilder().addAllPartitions(new ArrayList()).build(); + mockHiveMetastoreService.addResponse(expectedResponse); + ListPartitionsRequest request = + ListPartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setFilter("filter-1274492040") + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = + client.listPartitionsCallable(); + callable.serverStreamingCall(request, responseObserver); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void listPartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockHiveMetastoreService.addException(exception); + ListPartitionsRequest request = + ListPartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setFilter("filter-1274492040") + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = + client.listPartitionsCallable(); + callable.serverStreamingCall(request, responseObserver); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } +} diff --git a/java-biglake/google-cloud-biglake/src/test/java/com/google/cloud/biglake/hive/v1beta/MockHiveMetastoreService.java b/java-biglake/google-cloud-biglake/src/test/java/com/google/cloud/biglake/hive/v1beta/MockHiveMetastoreService.java new file mode 100644 index 000000000000..dc3a9baaae96 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/test/java/com/google/cloud/biglake/hive/v1beta/MockHiveMetastoreService.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockHiveMetastoreService implements MockGrpcService { + private final MockHiveMetastoreServiceImpl serviceImpl; + + public MockHiveMetastoreService() { + serviceImpl = new MockHiveMetastoreServiceImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-biglake/google-cloud-biglake/src/test/java/com/google/cloud/biglake/hive/v1beta/MockHiveMetastoreServiceImpl.java b/java-biglake/google-cloud-biglake/src/test/java/com/google/cloud/biglake/hive/v1beta/MockHiveMetastoreServiceImpl.java new file mode 100644 index 000000000000..338ff7fd1ae5 --- /dev/null +++ b/java-biglake/google-cloud-biglake/src/test/java/com/google/cloud/biglake/hive/v1beta/MockHiveMetastoreServiceImpl.java @@ -0,0 +1,466 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta; + +import com.google.api.core.BetaApi; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceGrpc.HiveMetastoreServiceImplBase; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockHiveMetastoreServiceImpl extends HiveMetastoreServiceImplBase { + private List requests; + private Queue responses; + + public MockHiveMetastoreServiceImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createHiveCatalog( + CreateHiveCatalogRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof HiveCatalog) { + requests.add(request); + responseObserver.onNext(((HiveCatalog) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateHiveCatalog, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + HiveCatalog.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getHiveCatalog( + GetHiveCatalogRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof HiveCatalog) { + requests.add(request); + responseObserver.onNext(((HiveCatalog) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetHiveCatalog, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + HiveCatalog.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listHiveCatalogs( + ListHiveCatalogsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListHiveCatalogsResponse) { + requests.add(request); + responseObserver.onNext(((ListHiveCatalogsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListHiveCatalogs, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListHiveCatalogsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateHiveCatalog( + UpdateHiveCatalogRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof HiveCatalog) { + requests.add(request); + responseObserver.onNext(((HiveCatalog) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateHiveCatalog, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + HiveCatalog.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteHiveCatalog( + DeleteHiveCatalogRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteHiveCatalog, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createHiveDatabase( + CreateHiveDatabaseRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof HiveDatabase) { + requests.add(request); + responseObserver.onNext(((HiveDatabase) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateHiveDatabase, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + HiveDatabase.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getHiveDatabase( + GetHiveDatabaseRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof HiveDatabase) { + requests.add(request); + responseObserver.onNext(((HiveDatabase) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetHiveDatabase, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + HiveDatabase.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listHiveDatabases( + ListHiveDatabasesRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListHiveDatabasesResponse) { + requests.add(request); + responseObserver.onNext(((ListHiveDatabasesResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListHiveDatabases, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListHiveDatabasesResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateHiveDatabase( + UpdateHiveDatabaseRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof HiveDatabase) { + requests.add(request); + responseObserver.onNext(((HiveDatabase) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateHiveDatabase, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + HiveDatabase.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteHiveDatabase( + DeleteHiveDatabaseRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteHiveDatabase, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createHiveTable( + CreateHiveTableRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof HiveTable) { + requests.add(request); + responseObserver.onNext(((HiveTable) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateHiveTable, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + HiveTable.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getHiveTable( + GetHiveTableRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof HiveTable) { + requests.add(request); + responseObserver.onNext(((HiveTable) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetHiveTable, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + HiveTable.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listHiveTables( + ListHiveTablesRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListHiveTablesResponse) { + requests.add(request); + responseObserver.onNext(((ListHiveTablesResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListHiveTables, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListHiveTablesResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateHiveTable( + UpdateHiveTableRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof HiveTable) { + requests.add(request); + responseObserver.onNext(((HiveTable) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateHiveTable, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + HiveTable.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteHiveTable( + DeleteHiveTableRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteHiveTable, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchCreatePartitions( + BatchCreatePartitionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BatchCreatePartitionsResponse) { + requests.add(request); + responseObserver.onNext(((BatchCreatePartitionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchCreatePartitions, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + BatchCreatePartitionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchDeletePartitions( + BatchDeletePartitionsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchDeletePartitions, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchUpdatePartitions( + BatchUpdatePartitionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BatchUpdatePartitionsResponse) { + requests.add(request); + responseObserver.onNext(((BatchUpdatePartitionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchUpdatePartitions, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + BatchUpdatePartitionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listPartitions( + ListPartitionsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListPartitionsResponse) { + requests.add(request); + responseObserver.onNext(((ListPartitionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListPartitions, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListPartitionsResponse.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/java-biglake/grpc-google-cloud-biglake-v1beta/pom.xml b/java-biglake/grpc-google-cloud-biglake-v1beta/pom.xml new file mode 100644 index 000000000000..7ceeacfbb66a --- /dev/null +++ b/java-biglake/grpc-google-cloud-biglake-v1beta/pom.xml @@ -0,0 +1,45 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-biglake-v1beta + 0.76.0-SNAPSHOT + grpc-google-cloud-biglake-v1beta + GRPC library for google-cloud-biglake + + com.google.cloud + google-cloud-biglake-parent + 0.76.0-SNAPSHOT + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api.grpc + proto-google-cloud-biglake-v1beta + + + com.google.guava + guava + + + diff --git a/java-biglake/grpc-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceGrpc.java b/java-biglake/grpc-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceGrpc.java new file mode 100644 index 000000000000..49ef2aec6e1f --- /dev/null +++ b/java-biglake/grpc-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreServiceGrpc.java @@ -0,0 +1,2941 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.biglake.hive.v1beta; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
+ * Hive Metastore Service is a biglake service that allows users to manage
+ * their external Hive catalogs. Full API compatibility with OSS Hive Metastore
+ * APIs is not supported. The methods match the Hive Metastore API spec mostly
+ * except for a few exceptions.
+ * These include listing resources with pattern,
+ * environment context which are combined in a single List API, return of
+ * ListResponse object instead of a list of resources, transactions, locks, etc.
+ * The BigLake Hive Metastore API defines the following resources:
+ * * A collection of Google Cloud projects: `/projects/*`
+ * * Each project has a collection of catalogs: `/catalogs/*`
+ * * Each catalog has a collection of databases: `/databases/*`
+ * * Each database has a collection of tables: `/tables/*`
+ * 
+ */ +@io.grpc.stub.annotations.GrpcGenerated +public final class HiveMetastoreServiceGrpc { + + private HiveMetastoreServiceGrpc() {} + + public static final java.lang.String SERVICE_NAME = + "google.cloud.biglake.hive.v1beta.HiveMetastoreService"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest, + com.google.cloud.biglake.hive.v1beta.HiveCatalog> + getCreateHiveCatalogMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateHiveCatalog", + requestType = com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.HiveCatalog.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest, + com.google.cloud.biglake.hive.v1beta.HiveCatalog> + getCreateHiveCatalogMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest, + com.google.cloud.biglake.hive.v1beta.HiveCatalog> + getCreateHiveCatalogMethod; + if ((getCreateHiveCatalogMethod = HiveMetastoreServiceGrpc.getCreateHiveCatalogMethod) + == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getCreateHiveCatalogMethod = HiveMetastoreServiceGrpc.getCreateHiveCatalogMethod) + == null) { + HiveMetastoreServiceGrpc.getCreateHiveCatalogMethod = + getCreateHiveCatalogMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateHiveCatalog")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.HiveCatalog + .getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("CreateHiveCatalog")) + .build(); + } + } + } + return getCreateHiveCatalogMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest, + com.google.cloud.biglake.hive.v1beta.HiveCatalog> + getGetHiveCatalogMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetHiveCatalog", + requestType = com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.HiveCatalog.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest, + com.google.cloud.biglake.hive.v1beta.HiveCatalog> + getGetHiveCatalogMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest, + com.google.cloud.biglake.hive.v1beta.HiveCatalog> + getGetHiveCatalogMethod; + if ((getGetHiveCatalogMethod = HiveMetastoreServiceGrpc.getGetHiveCatalogMethod) == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getGetHiveCatalogMethod = HiveMetastoreServiceGrpc.getGetHiveCatalogMethod) == null) { + HiveMetastoreServiceGrpc.getGetHiveCatalogMethod = + getGetHiveCatalogMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetHiveCatalog")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.HiveCatalog + .getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("GetHiveCatalog")) + .build(); + } + } + } + return getGetHiveCatalogMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest, + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse> + getListHiveCatalogsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListHiveCatalogs", + requestType = com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest, + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse> + getListHiveCatalogsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest, + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse> + getListHiveCatalogsMethod; + if ((getListHiveCatalogsMethod = HiveMetastoreServiceGrpc.getListHiveCatalogsMethod) == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getListHiveCatalogsMethod = HiveMetastoreServiceGrpc.getListHiveCatalogsMethod) + == null) { + HiveMetastoreServiceGrpc.getListHiveCatalogsMethod = + getListHiveCatalogsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListHiveCatalogs")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("ListHiveCatalogs")) + .build(); + } + } + } + return getListHiveCatalogsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest, + com.google.cloud.biglake.hive.v1beta.HiveCatalog> + getUpdateHiveCatalogMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateHiveCatalog", + requestType = com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.HiveCatalog.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest, + com.google.cloud.biglake.hive.v1beta.HiveCatalog> + getUpdateHiveCatalogMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest, + com.google.cloud.biglake.hive.v1beta.HiveCatalog> + getUpdateHiveCatalogMethod; + if ((getUpdateHiveCatalogMethod = HiveMetastoreServiceGrpc.getUpdateHiveCatalogMethod) + == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getUpdateHiveCatalogMethod = HiveMetastoreServiceGrpc.getUpdateHiveCatalogMethod) + == null) { + HiveMetastoreServiceGrpc.getUpdateHiveCatalogMethod = + getUpdateHiveCatalogMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UpdateHiveCatalog")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.HiveCatalog + .getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("UpdateHiveCatalog")) + .build(); + } + } + } + return getUpdateHiveCatalogMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest, com.google.protobuf.Empty> + getDeleteHiveCatalogMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteHiveCatalog", + requestType = com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest, com.google.protobuf.Empty> + getDeleteHiveCatalogMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest, + com.google.protobuf.Empty> + getDeleteHiveCatalogMethod; + if ((getDeleteHiveCatalogMethod = HiveMetastoreServiceGrpc.getDeleteHiveCatalogMethod) + == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getDeleteHiveCatalogMethod = HiveMetastoreServiceGrpc.getDeleteHiveCatalogMethod) + == null) { + HiveMetastoreServiceGrpc.getDeleteHiveCatalogMethod = + getDeleteHiveCatalogMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "DeleteHiveCatalog")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("DeleteHiveCatalog")) + .build(); + } + } + } + return getDeleteHiveCatalogMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest, + com.google.cloud.biglake.hive.v1beta.HiveDatabase> + getCreateHiveDatabaseMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateHiveDatabase", + requestType = com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.HiveDatabase.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest, + com.google.cloud.biglake.hive.v1beta.HiveDatabase> + getCreateHiveDatabaseMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest, + com.google.cloud.biglake.hive.v1beta.HiveDatabase> + getCreateHiveDatabaseMethod; + if ((getCreateHiveDatabaseMethod = HiveMetastoreServiceGrpc.getCreateHiveDatabaseMethod) + == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getCreateHiveDatabaseMethod = HiveMetastoreServiceGrpc.getCreateHiveDatabaseMethod) + == null) { + HiveMetastoreServiceGrpc.getCreateHiveDatabaseMethod = + getCreateHiveDatabaseMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateHiveDatabase")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.HiveDatabase + .getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("CreateHiveDatabase")) + .build(); + } + } + } + return getCreateHiveDatabaseMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest, + com.google.cloud.biglake.hive.v1beta.HiveDatabase> + getGetHiveDatabaseMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetHiveDatabase", + requestType = com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.HiveDatabase.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest, + com.google.cloud.biglake.hive.v1beta.HiveDatabase> + getGetHiveDatabaseMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest, + com.google.cloud.biglake.hive.v1beta.HiveDatabase> + getGetHiveDatabaseMethod; + if ((getGetHiveDatabaseMethod = HiveMetastoreServiceGrpc.getGetHiveDatabaseMethod) == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getGetHiveDatabaseMethod = HiveMetastoreServiceGrpc.getGetHiveDatabaseMethod) + == null) { + HiveMetastoreServiceGrpc.getGetHiveDatabaseMethod = + getGetHiveDatabaseMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetHiveDatabase")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.HiveDatabase + .getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("GetHiveDatabase")) + .build(); + } + } + } + return getGetHiveDatabaseMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest, + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse> + getListHiveDatabasesMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListHiveDatabases", + requestType = com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest, + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse> + getListHiveDatabasesMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest, + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse> + getListHiveDatabasesMethod; + if ((getListHiveDatabasesMethod = HiveMetastoreServiceGrpc.getListHiveDatabasesMethod) + == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getListHiveDatabasesMethod = HiveMetastoreServiceGrpc.getListHiveDatabasesMethod) + == null) { + HiveMetastoreServiceGrpc.getListHiveDatabasesMethod = + getListHiveDatabasesMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListHiveDatabases")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("ListHiveDatabases")) + .build(); + } + } + } + return getListHiveDatabasesMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest, + com.google.cloud.biglake.hive.v1beta.HiveDatabase> + getUpdateHiveDatabaseMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateHiveDatabase", + requestType = com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.HiveDatabase.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest, + com.google.cloud.biglake.hive.v1beta.HiveDatabase> + getUpdateHiveDatabaseMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest, + com.google.cloud.biglake.hive.v1beta.HiveDatabase> + getUpdateHiveDatabaseMethod; + if ((getUpdateHiveDatabaseMethod = HiveMetastoreServiceGrpc.getUpdateHiveDatabaseMethod) + == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getUpdateHiveDatabaseMethod = HiveMetastoreServiceGrpc.getUpdateHiveDatabaseMethod) + == null) { + HiveMetastoreServiceGrpc.getUpdateHiveDatabaseMethod = + getUpdateHiveDatabaseMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UpdateHiveDatabase")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.HiveDatabase + .getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("UpdateHiveDatabase")) + .build(); + } + } + } + return getUpdateHiveDatabaseMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest, com.google.protobuf.Empty> + getDeleteHiveDatabaseMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteHiveDatabase", + requestType = com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest, com.google.protobuf.Empty> + getDeleteHiveDatabaseMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest, + com.google.protobuf.Empty> + getDeleteHiveDatabaseMethod; + if ((getDeleteHiveDatabaseMethod = HiveMetastoreServiceGrpc.getDeleteHiveDatabaseMethod) + == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getDeleteHiveDatabaseMethod = HiveMetastoreServiceGrpc.getDeleteHiveDatabaseMethod) + == null) { + HiveMetastoreServiceGrpc.getDeleteHiveDatabaseMethod = + getDeleteHiveDatabaseMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "DeleteHiveDatabase")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("DeleteHiveDatabase")) + .build(); + } + } + } + return getDeleteHiveDatabaseMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest, + com.google.cloud.biglake.hive.v1beta.HiveTable> + getCreateHiveTableMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateHiveTable", + requestType = com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.HiveTable.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest, + com.google.cloud.biglake.hive.v1beta.HiveTable> + getCreateHiveTableMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest, + com.google.cloud.biglake.hive.v1beta.HiveTable> + getCreateHiveTableMethod; + if ((getCreateHiveTableMethod = HiveMetastoreServiceGrpc.getCreateHiveTableMethod) == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getCreateHiveTableMethod = HiveMetastoreServiceGrpc.getCreateHiveTableMethod) + == null) { + HiveMetastoreServiceGrpc.getCreateHiveTableMethod = + getCreateHiveTableMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateHiveTable")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("CreateHiveTable")) + .build(); + } + } + } + return getCreateHiveTableMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest, + com.google.cloud.biglake.hive.v1beta.HiveTable> + getGetHiveTableMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetHiveTable", + requestType = com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.HiveTable.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest, + com.google.cloud.biglake.hive.v1beta.HiveTable> + getGetHiveTableMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest, + com.google.cloud.biglake.hive.v1beta.HiveTable> + getGetHiveTableMethod; + if ((getGetHiveTableMethod = HiveMetastoreServiceGrpc.getGetHiveTableMethod) == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getGetHiveTableMethod = HiveMetastoreServiceGrpc.getGetHiveTableMethod) == null) { + HiveMetastoreServiceGrpc.getGetHiveTableMethod = + getGetHiveTableMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetHiveTable")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("GetHiveTable")) + .build(); + } + } + } + return getGetHiveTableMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest, + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse> + getListHiveTablesMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListHiveTables", + requestType = com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest, + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse> + getListHiveTablesMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest, + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse> + getListHiveTablesMethod; + if ((getListHiveTablesMethod = HiveMetastoreServiceGrpc.getListHiveTablesMethod) == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getListHiveTablesMethod = HiveMetastoreServiceGrpc.getListHiveTablesMethod) == null) { + HiveMetastoreServiceGrpc.getListHiveTablesMethod = + getListHiveTablesMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListHiveTables")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("ListHiveTables")) + .build(); + } + } + } + return getListHiveTablesMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest, + com.google.cloud.biglake.hive.v1beta.HiveTable> + getUpdateHiveTableMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateHiveTable", + requestType = com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.HiveTable.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest, + com.google.cloud.biglake.hive.v1beta.HiveTable> + getUpdateHiveTableMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest, + com.google.cloud.biglake.hive.v1beta.HiveTable> + getUpdateHiveTableMethod; + if ((getUpdateHiveTableMethod = HiveMetastoreServiceGrpc.getUpdateHiveTableMethod) == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getUpdateHiveTableMethod = HiveMetastoreServiceGrpc.getUpdateHiveTableMethod) + == null) { + HiveMetastoreServiceGrpc.getUpdateHiveTableMethod = + getUpdateHiveTableMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UpdateHiveTable")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("UpdateHiveTable")) + .build(); + } + } + } + return getUpdateHiveTableMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest, com.google.protobuf.Empty> + getDeleteHiveTableMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteHiveTable", + requestType = com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest, com.google.protobuf.Empty> + getDeleteHiveTableMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest, com.google.protobuf.Empty> + getDeleteHiveTableMethod; + if ((getDeleteHiveTableMethod = HiveMetastoreServiceGrpc.getDeleteHiveTableMethod) == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getDeleteHiveTableMethod = HiveMetastoreServiceGrpc.getDeleteHiveTableMethod) + == null) { + HiveMetastoreServiceGrpc.getDeleteHiveTableMethod = + getDeleteHiveTableMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "DeleteHiveTable")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("DeleteHiveTable")) + .build(); + } + } + } + return getDeleteHiveTableMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest, + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse> + getBatchCreatePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchCreatePartitions", + requestType = com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest, + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse> + getBatchCreatePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest, + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse> + getBatchCreatePartitionsMethod; + if ((getBatchCreatePartitionsMethod = HiveMetastoreServiceGrpc.getBatchCreatePartitionsMethod) + == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getBatchCreatePartitionsMethod = + HiveMetastoreServiceGrpc.getBatchCreatePartitionsMethod) + == null) { + HiveMetastoreServiceGrpc.getBatchCreatePartitionsMethod = + getBatchCreatePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchCreatePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("BatchCreatePartitions")) + .build(); + } + } + } + return getBatchCreatePartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest, + com.google.protobuf.Empty> + getBatchDeletePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchDeletePartitions", + requestType = com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest, + com.google.protobuf.Empty> + getBatchDeletePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest, + com.google.protobuf.Empty> + getBatchDeletePartitionsMethod; + if ((getBatchDeletePartitionsMethod = HiveMetastoreServiceGrpc.getBatchDeletePartitionsMethod) + == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getBatchDeletePartitionsMethod = + HiveMetastoreServiceGrpc.getBatchDeletePartitionsMethod) + == null) { + HiveMetastoreServiceGrpc.getBatchDeletePartitionsMethod = + getBatchDeletePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchDeletePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("BatchDeletePartitions")) + .build(); + } + } + } + return getBatchDeletePartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest, + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse> + getBatchUpdatePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchUpdatePartitions", + requestType = com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest, + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse> + getBatchUpdatePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest, + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse> + getBatchUpdatePartitionsMethod; + if ((getBatchUpdatePartitionsMethod = HiveMetastoreServiceGrpc.getBatchUpdatePartitionsMethod) + == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getBatchUpdatePartitionsMethod = + HiveMetastoreServiceGrpc.getBatchUpdatePartitionsMethod) + == null) { + HiveMetastoreServiceGrpc.getBatchUpdatePartitionsMethod = + getBatchUpdatePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchUpdatePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("BatchUpdatePartitions")) + .build(); + } + } + } + return getBatchUpdatePartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest, + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse> + getListPartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListPartitions", + requestType = com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest.class, + responseType = com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest, + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse> + getListPartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest, + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse> + getListPartitionsMethod; + if ((getListPartitionsMethod = HiveMetastoreServiceGrpc.getListPartitionsMethod) == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + if ((getListPartitionsMethod = HiveMetastoreServiceGrpc.getListPartitionsMethod) == null) { + HiveMetastoreServiceGrpc.getListPartitionsMethod = + getListPartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListPartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new HiveMetastoreServiceMethodDescriptorSupplier("ListPartitions")) + .build(); + } + } + } + return getListPartitionsMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static HiveMetastoreServiceStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public HiveMetastoreServiceStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HiveMetastoreServiceStub(channel, callOptions); + } + }; + return HiveMetastoreServiceStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static HiveMetastoreServiceBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public HiveMetastoreServiceBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HiveMetastoreServiceBlockingV2Stub(channel, callOptions); + } + }; + return HiveMetastoreServiceBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static HiveMetastoreServiceBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public HiveMetastoreServiceBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HiveMetastoreServiceBlockingStub(channel, callOptions); + } + }; + return HiveMetastoreServiceBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static HiveMetastoreServiceFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public HiveMetastoreServiceFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HiveMetastoreServiceFutureStub(channel, callOptions); + } + }; + return HiveMetastoreServiceFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * Hive Metastore Service is a biglake service that allows users to manage
+   * their external Hive catalogs. Full API compatibility with OSS Hive Metastore
+   * APIs is not supported. The methods match the Hive Metastore API spec mostly
+   * except for a few exceptions.
+   * These include listing resources with pattern,
+   * environment context which are combined in a single List API, return of
+   * ListResponse object instead of a list of resources, transactions, locks, etc.
+   * The BigLake Hive Metastore API defines the following resources:
+   * * A collection of Google Cloud projects: `/projects/*`
+   * * Each project has a collection of catalogs: `/catalogs/*`
+   * * Each catalog has a collection of databases: `/databases/*`
+   * * Each database has a collection of tables: `/tables/*`
+   * 
+ */ + public interface AsyncService { + + /** + * + * + *
+     * Creates a new hive catalog.
+     * 
+ */ + default void createHiveCatalog( + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateHiveCatalogMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets the catalog specified by the resource name.
+     * 
+ */ + default void getHiveCatalog( + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetHiveCatalogMethod(), responseObserver); + } + + /** + * + * + *
+     * List all catalogs in a specified project.
+     * 
+ */ + default void listHiveCatalogs( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListHiveCatalogsMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates an existing catalog.
+     * 
+ */ + default void updateHiveCatalog( + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateHiveCatalogMethod(), responseObserver); + } + + /** + * + * + *
+     * Deletes an existing catalog specified by the catalog ID. Delete will fail
+     * if the catalog is not empty.
+     * 
+ */ + default void deleteHiveCatalog( + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteHiveCatalogMethod(), responseObserver); + } + + /** + * + * + *
+     * Creates a new database.
+     * 
+ */ + default void createHiveDatabase( + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateHiveDatabaseMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets the database specified by the resource name.
+     * 
+ */ + default void getHiveDatabase( + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetHiveDatabaseMethod(), responseObserver); + } + + /** + * + * + *
+     * List all databases in a specified catalog.
+     * 
+ */ + default void listHiveDatabases( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListHiveDatabasesMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates an existing database specified by the database name.
+     * 
+ */ + default void updateHiveDatabase( + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateHiveDatabaseMethod(), responseObserver); + } + + /** + * + * + *
+     * Deletes an existing database specified by the database name.
+     * 
+ */ + default void deleteHiveDatabase( + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteHiveDatabaseMethod(), responseObserver); + } + + /** + * + * + *
+     * Creates a new hive table.
+     * 
+ */ + default void createHiveTable( + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateHiveTableMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets the table specified by the resource name.
+     * 
+ */ + default void getHiveTable( + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetHiveTableMethod(), responseObserver); + } + + /** + * + * + *
+     * List all hive tables in a specified project under the hive catalog and
+     * database.
+     * 
+ */ + default void listHiveTables( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListHiveTablesMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates an existing table specified by the table name.
+     * 
+ */ + default void updateHiveTable( + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateHiveTableMethod(), responseObserver); + } + + /** + * + * + *
+     * Deletes an existing table specified by the table name.
+     * 
+ */ + default void deleteHiveTable( + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteHiveTableMethod(), responseObserver); + } + + /** + * + * + *
+     * Adds partitions to a table.
+     * 
+ */ + default void batchCreatePartitions( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchCreatePartitionsMethod(), responseObserver); + } + + /** + * + * + *
+     * Deletes partitions from a table.
+     * 
+ */ + default void batchDeletePartitions( + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchDeletePartitionsMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates partitions in a table.
+     * 
+ */ + default void batchUpdatePartitions( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchUpdatePartitionsMethod(), responseObserver); + } + + /** + * + * + *
+     * Streams list of partitions from a table.
+     * 
+ */ + default void listPartitions( + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListPartitionsMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service HiveMetastoreService. + * + *
+   * Hive Metastore Service is a biglake service that allows users to manage
+   * their external Hive catalogs. Full API compatibility with OSS Hive Metastore
+   * APIs is not supported. The methods match the Hive Metastore API spec mostly
+   * except for a few exceptions.
+   * These include listing resources with pattern,
+   * environment context which are combined in a single List API, return of
+   * ListResponse object instead of a list of resources, transactions, locks, etc.
+   * The BigLake Hive Metastore API defines the following resources:
+   * * A collection of Google Cloud projects: `/projects/*`
+   * * Each project has a collection of catalogs: `/catalogs/*`
+   * * Each catalog has a collection of databases: `/databases/*`
+   * * Each database has a collection of tables: `/tables/*`
+   * 
+ */ + public abstract static class HiveMetastoreServiceImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return HiveMetastoreServiceGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service HiveMetastoreService. + * + *
+   * Hive Metastore Service is a biglake service that allows users to manage
+   * their external Hive catalogs. Full API compatibility with OSS Hive Metastore
+   * APIs is not supported. The methods match the Hive Metastore API spec mostly
+   * except for a few exceptions.
+   * These include listing resources with pattern,
+   * environment context which are combined in a single List API, return of
+   * ListResponse object instead of a list of resources, transactions, locks, etc.
+   * The BigLake Hive Metastore API defines the following resources:
+   * * A collection of Google Cloud projects: `/projects/*`
+   * * Each project has a collection of catalogs: `/catalogs/*`
+   * * Each catalog has a collection of databases: `/databases/*`
+   * * Each database has a collection of tables: `/tables/*`
+   * 
+ */ + public static final class HiveMetastoreServiceStub + extends io.grpc.stub.AbstractAsyncStub { + private HiveMetastoreServiceStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected HiveMetastoreServiceStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HiveMetastoreServiceStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new hive catalog.
+     * 
+ */ + public void createHiveCatalog( + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateHiveCatalogMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Gets the catalog specified by the resource name.
+     * 
+ */ + public void getHiveCatalog( + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetHiveCatalogMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * List all catalogs in a specified project.
+     * 
+ */ + public void listHiveCatalogs( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListHiveCatalogsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates an existing catalog.
+     * 
+ */ + public void updateHiveCatalog( + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateHiveCatalogMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Deletes an existing catalog specified by the catalog ID. Delete will fail
+     * if the catalog is not empty.
+     * 
+ */ + public void deleteHiveCatalog( + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteHiveCatalogMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Creates a new database.
+     * 
+ */ + public void createHiveDatabase( + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateHiveDatabaseMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Gets the database specified by the resource name.
+     * 
+ */ + public void getHiveDatabase( + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetHiveDatabaseMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * List all databases in a specified catalog.
+     * 
+ */ + public void listHiveDatabases( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListHiveDatabasesMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates an existing database specified by the database name.
+     * 
+ */ + public void updateHiveDatabase( + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateHiveDatabaseMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Deletes an existing database specified by the database name.
+     * 
+ */ + public void deleteHiveDatabase( + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteHiveDatabaseMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Creates a new hive table.
+     * 
+ */ + public void createHiveTable( + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateHiveTableMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Gets the table specified by the resource name.
+     * 
+ */ + public void getHiveTable( + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetHiveTableMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * List all hive tables in a specified project under the hive catalog and
+     * database.
+     * 
+ */ + public void listHiveTables( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListHiveTablesMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates an existing table specified by the table name.
+     * 
+ */ + public void updateHiveTable( + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateHiveTableMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Deletes an existing table specified by the table name.
+     * 
+ */ + public void deleteHiveTable( + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteHiveTableMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Adds partitions to a table.
+     * 
+ */ + public void batchCreatePartitions( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchCreatePartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Deletes partitions from a table.
+     * 
+ */ + public void batchDeletePartitions( + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchDeletePartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates partitions in a table.
+     * 
+ */ + public void batchUpdatePartitions( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchUpdatePartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Streams list of partitions from a table.
+     * 
+ */ + public void listPartitions( + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncServerStreamingCall( + getChannel().newCall(getListPartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service HiveMetastoreService. + * + *
+   * Hive Metastore Service is a biglake service that allows users to manage
+   * their external Hive catalogs. Full API compatibility with OSS Hive Metastore
+   * APIs is not supported. The methods match the Hive Metastore API spec mostly
+   * except for a few exceptions.
+   * These include listing resources with pattern,
+   * environment context which are combined in a single List API, return of
+   * ListResponse object instead of a list of resources, transactions, locks, etc.
+   * The BigLake Hive Metastore API defines the following resources:
+   * * A collection of Google Cloud projects: `/projects/*`
+   * * Each project has a collection of catalogs: `/catalogs/*`
+   * * Each catalog has a collection of databases: `/databases/*`
+   * * Each database has a collection of tables: `/tables/*`
+   * 
+ */ + public static final class HiveMetastoreServiceBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private HiveMetastoreServiceBlockingV2Stub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected HiveMetastoreServiceBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HiveMetastoreServiceBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new hive catalog.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog createHiveCatalog( + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateHiveCatalogMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets the catalog specified by the resource name.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog getHiveCatalog( + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetHiveCatalogMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * List all catalogs in a specified project.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse listHiveCatalogs( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListHiveCatalogsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an existing catalog.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog updateHiveCatalog( + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateHiveCatalogMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes an existing catalog specified by the catalog ID. Delete will fail
+     * if the catalog is not empty.
+     * 
+ */ + public com.google.protobuf.Empty deleteHiveCatalog( + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteHiveCatalogMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates a new database.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase createHiveDatabase( + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateHiveDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets the database specified by the resource name.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase getHiveDatabase( + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetHiveDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * List all databases in a specified catalog.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse listHiveDatabases( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListHiveDatabasesMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an existing database specified by the database name.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase updateHiveDatabase( + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateHiveDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes an existing database specified by the database name.
+     * 
+ */ + public com.google.protobuf.Empty deleteHiveDatabase( + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteHiveDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates a new hive table.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveTable createHiveTable( + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateHiveTableMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets the table specified by the resource name.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveTable getHiveTable( + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetHiveTableMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * List all hive tables in a specified project under the hive catalog and
+     * database.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse listHiveTables( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListHiveTablesMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an existing table specified by the table name.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveTable updateHiveTable( + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateHiveTableMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes an existing table specified by the table name.
+     * 
+ */ + public com.google.protobuf.Empty deleteHiveTable( + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteHiveTableMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Adds partitions to a table.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse batchCreatePartitions( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchCreatePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes partitions from a table.
+     * 
+ */ + public com.google.protobuf.Empty batchDeletePartitions( + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchDeletePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates partitions in a table.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse batchUpdatePartitions( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchUpdatePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Streams list of partitions from a table.
+     * 
+ */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall< + ?, com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse> + listPartitions(com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest request) { + return io.grpc.stub.ClientCalls.blockingV2ServerStreamingCall( + getChannel(), getListPartitionsMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service HiveMetastoreService. + * + *
+   * Hive Metastore Service is a biglake service that allows users to manage
+   * their external Hive catalogs. Full API compatibility with OSS Hive Metastore
+   * APIs is not supported. The methods match the Hive Metastore API spec mostly
+   * except for a few exceptions.
+   * These include listing resources with pattern,
+   * environment context which are combined in a single List API, return of
+   * ListResponse object instead of a list of resources, transactions, locks, etc.
+   * The BigLake Hive Metastore API defines the following resources:
+   * * A collection of Google Cloud projects: `/projects/*`
+   * * Each project has a collection of catalogs: `/catalogs/*`
+   * * Each catalog has a collection of databases: `/databases/*`
+   * * Each database has a collection of tables: `/tables/*`
+   * 
+ */ + public static final class HiveMetastoreServiceBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private HiveMetastoreServiceBlockingStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected HiveMetastoreServiceBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HiveMetastoreServiceBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new hive catalog.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog createHiveCatalog( + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateHiveCatalogMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets the catalog specified by the resource name.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog getHiveCatalog( + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetHiveCatalogMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * List all catalogs in a specified project.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse listHiveCatalogs( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListHiveCatalogsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an existing catalog.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog updateHiveCatalog( + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateHiveCatalogMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes an existing catalog specified by the catalog ID. Delete will fail
+     * if the catalog is not empty.
+     * 
+ */ + public com.google.protobuf.Empty deleteHiveCatalog( + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteHiveCatalogMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates a new database.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase createHiveDatabase( + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateHiveDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets the database specified by the resource name.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase getHiveDatabase( + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetHiveDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * List all databases in a specified catalog.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse listHiveDatabases( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListHiveDatabasesMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an existing database specified by the database name.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase updateHiveDatabase( + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateHiveDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes an existing database specified by the database name.
+     * 
+ */ + public com.google.protobuf.Empty deleteHiveDatabase( + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteHiveDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates a new hive table.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveTable createHiveTable( + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateHiveTableMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets the table specified by the resource name.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveTable getHiveTable( + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetHiveTableMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * List all hive tables in a specified project under the hive catalog and
+     * database.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse listHiveTables( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListHiveTablesMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an existing table specified by the table name.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.HiveTable updateHiveTable( + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateHiveTableMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes an existing table specified by the table name.
+     * 
+ */ + public com.google.protobuf.Empty deleteHiveTable( + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteHiveTableMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Adds partitions to a table.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse batchCreatePartitions( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchCreatePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes partitions from a table.
+     * 
+ */ + public com.google.protobuf.Empty batchDeletePartitions( + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchDeletePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates partitions in a table.
+     * 
+ */ + public com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse batchUpdatePartitions( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchUpdatePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Streams list of partitions from a table.
+     * 
+ */ + public java.util.Iterator + listPartitions(com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest request) { + return io.grpc.stub.ClientCalls.blockingServerStreamingCall( + getChannel(), getListPartitionsMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service HiveMetastoreService. + * + *
+   * Hive Metastore Service is a biglake service that allows users to manage
+   * their external Hive catalogs. Full API compatibility with OSS Hive Metastore
+   * APIs is not supported. The methods match the Hive Metastore API spec mostly
+   * except for a few exceptions.
+   * These include listing resources with pattern,
+   * environment context which are combined in a single List API, return of
+   * ListResponse object instead of a list of resources, transactions, locks, etc.
+   * The BigLake Hive Metastore API defines the following resources:
+   * * A collection of Google Cloud projects: `/projects/*`
+   * * Each project has a collection of catalogs: `/catalogs/*`
+   * * Each catalog has a collection of databases: `/databases/*`
+   * * Each database has a collection of tables: `/tables/*`
+   * 
+ */ + public static final class HiveMetastoreServiceFutureStub + extends io.grpc.stub.AbstractFutureStub { + private HiveMetastoreServiceFutureStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected HiveMetastoreServiceFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HiveMetastoreServiceFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new hive catalog.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.HiveCatalog> + createHiveCatalog(com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateHiveCatalogMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Gets the catalog specified by the resource name.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.HiveCatalog> + getHiveCatalog(com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetHiveCatalogMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * List all catalogs in a specified project.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse> + listHiveCatalogs(com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListHiveCatalogsMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates an existing catalog.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.HiveCatalog> + updateHiveCatalog(com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateHiveCatalogMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Deletes an existing catalog specified by the catalog ID. Delete will fail
+     * if the catalog is not empty.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + deleteHiveCatalog(com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteHiveCatalogMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Creates a new database.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.HiveDatabase> + createHiveDatabase(com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateHiveDatabaseMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Gets the database specified by the resource name.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.HiveDatabase> + getHiveDatabase(com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetHiveDatabaseMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * List all databases in a specified catalog.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse> + listHiveDatabases(com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListHiveDatabasesMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates an existing database specified by the database name.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.HiveDatabase> + updateHiveDatabase(com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateHiveDatabaseMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Deletes an existing database specified by the database name.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + deleteHiveDatabase(com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteHiveDatabaseMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Creates a new hive table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.HiveTable> + createHiveTable(com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateHiveTableMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Gets the table specified by the resource name.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.HiveTable> + getHiveTable(com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetHiveTableMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * List all hive tables in a specified project under the hive catalog and
+     * database.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse> + listHiveTables(com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListHiveTablesMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates an existing table specified by the table name.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.HiveTable> + updateHiveTable(com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateHiveTableMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Deletes an existing table specified by the table name.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + deleteHiveTable(com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteHiveTableMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Adds partitions to a table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse> + batchCreatePartitions( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchCreatePartitionsMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Deletes partitions from a table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + batchDeletePartitions( + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchDeletePartitionsMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates partitions in a table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse> + batchUpdatePartitions( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchUpdatePartitionsMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_HIVE_CATALOG = 0; + private static final int METHODID_GET_HIVE_CATALOG = 1; + private static final int METHODID_LIST_HIVE_CATALOGS = 2; + private static final int METHODID_UPDATE_HIVE_CATALOG = 3; + private static final int METHODID_DELETE_HIVE_CATALOG = 4; + private static final int METHODID_CREATE_HIVE_DATABASE = 5; + private static final int METHODID_GET_HIVE_DATABASE = 6; + private static final int METHODID_LIST_HIVE_DATABASES = 7; + private static final int METHODID_UPDATE_HIVE_DATABASE = 8; + private static final int METHODID_DELETE_HIVE_DATABASE = 9; + private static final int METHODID_CREATE_HIVE_TABLE = 10; + private static final int METHODID_GET_HIVE_TABLE = 11; + private static final int METHODID_LIST_HIVE_TABLES = 12; + private static final int METHODID_UPDATE_HIVE_TABLE = 13; + private static final int METHODID_DELETE_HIVE_TABLE = 14; + private static final int METHODID_BATCH_CREATE_PARTITIONS = 15; + private static final int METHODID_BATCH_DELETE_PARTITIONS = 16; + private static final int METHODID_BATCH_UPDATE_PARTITIONS = 17; + private static final int METHODID_LIST_PARTITIONS = 18; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_HIVE_CATALOG: + serviceImpl.createHiveCatalog( + (com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_HIVE_CATALOG: + serviceImpl.getHiveCatalog( + (com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_LIST_HIVE_CATALOGS: + serviceImpl.listHiveCatalogs( + (com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse>) + responseObserver); + break; + case METHODID_UPDATE_HIVE_CATALOG: + serviceImpl.updateHiveCatalog( + (com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_DELETE_HIVE_CATALOG: + serviceImpl.deleteHiveCatalog( + (com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_CREATE_HIVE_DATABASE: + serviceImpl.createHiveDatabase( + (com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_HIVE_DATABASE: + serviceImpl.getHiveDatabase( + (com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_LIST_HIVE_DATABASES: + serviceImpl.listHiveDatabases( + (com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse>) + responseObserver); + break; + case METHODID_UPDATE_HIVE_DATABASE: + serviceImpl.updateHiveDatabase( + (com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_DELETE_HIVE_DATABASE: + serviceImpl.deleteHiveDatabase( + (com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_CREATE_HIVE_TABLE: + serviceImpl.createHiveTable( + (com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_HIVE_TABLE: + serviceImpl.getHiveTable( + (com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_LIST_HIVE_TABLES: + serviceImpl.listHiveTables( + (com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse>) + responseObserver); + break; + case METHODID_UPDATE_HIVE_TABLE: + serviceImpl.updateHiveTable( + (com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_DELETE_HIVE_TABLE: + serviceImpl.deleteHiveTable( + (com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_BATCH_CREATE_PARTITIONS: + serviceImpl.batchCreatePartitions( + (com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse>) + responseObserver); + break; + case METHODID_BATCH_DELETE_PARTITIONS: + serviceImpl.batchDeletePartitions( + (com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_BATCH_UPDATE_PARTITIONS: + serviceImpl.batchUpdatePartitions( + (com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse>) + responseObserver); + break; + case METHODID_LIST_PARTITIONS: + serviceImpl.listPartitions( + (com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse>) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateHiveCatalogMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest, + com.google.cloud.biglake.hive.v1beta.HiveCatalog>( + service, METHODID_CREATE_HIVE_CATALOG))) + .addMethod( + getGetHiveCatalogMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest, + com.google.cloud.biglake.hive.v1beta.HiveCatalog>( + service, METHODID_GET_HIVE_CATALOG))) + .addMethod( + getListHiveCatalogsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest, + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse>( + service, METHODID_LIST_HIVE_CATALOGS))) + .addMethod( + getUpdateHiveCatalogMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest, + com.google.cloud.biglake.hive.v1beta.HiveCatalog>( + service, METHODID_UPDATE_HIVE_CATALOG))) + .addMethod( + getDeleteHiveCatalogMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest, + com.google.protobuf.Empty>(service, METHODID_DELETE_HIVE_CATALOG))) + .addMethod( + getCreateHiveDatabaseMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest, + com.google.cloud.biglake.hive.v1beta.HiveDatabase>( + service, METHODID_CREATE_HIVE_DATABASE))) + .addMethod( + getGetHiveDatabaseMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest, + com.google.cloud.biglake.hive.v1beta.HiveDatabase>( + service, METHODID_GET_HIVE_DATABASE))) + .addMethod( + getListHiveDatabasesMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest, + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse>( + service, METHODID_LIST_HIVE_DATABASES))) + .addMethod( + getUpdateHiveDatabaseMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest, + com.google.cloud.biglake.hive.v1beta.HiveDatabase>( + service, METHODID_UPDATE_HIVE_DATABASE))) + .addMethod( + getDeleteHiveDatabaseMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest, + com.google.protobuf.Empty>(service, METHODID_DELETE_HIVE_DATABASE))) + .addMethod( + getCreateHiveTableMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest, + com.google.cloud.biglake.hive.v1beta.HiveTable>( + service, METHODID_CREATE_HIVE_TABLE))) + .addMethod( + getGetHiveTableMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest, + com.google.cloud.biglake.hive.v1beta.HiveTable>( + service, METHODID_GET_HIVE_TABLE))) + .addMethod( + getListHiveTablesMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest, + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse>( + service, METHODID_LIST_HIVE_TABLES))) + .addMethod( + getUpdateHiveTableMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest, + com.google.cloud.biglake.hive.v1beta.HiveTable>( + service, METHODID_UPDATE_HIVE_TABLE))) + .addMethod( + getDeleteHiveTableMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest, + com.google.protobuf.Empty>(service, METHODID_DELETE_HIVE_TABLE))) + .addMethod( + getBatchCreatePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest, + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse>( + service, METHODID_BATCH_CREATE_PARTITIONS))) + .addMethod( + getBatchDeletePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest, + com.google.protobuf.Empty>(service, METHODID_BATCH_DELETE_PARTITIONS))) + .addMethod( + getBatchUpdatePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest, + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse>( + service, METHODID_BATCH_UPDATE_PARTITIONS))) + .addMethod( + getListPartitionsMethod(), + io.grpc.stub.ServerCalls.asyncServerStreamingCall( + new MethodHandlers< + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest, + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse>( + service, METHODID_LIST_PARTITIONS))) + .build(); + } + + private abstract static class HiveMetastoreServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + HiveMetastoreServiceBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("HiveMetastoreService"); + } + } + + private static final class HiveMetastoreServiceFileDescriptorSupplier + extends HiveMetastoreServiceBaseDescriptorSupplier { + HiveMetastoreServiceFileDescriptorSupplier() {} + } + + private static final class HiveMetastoreServiceMethodDescriptorSupplier + extends HiveMetastoreServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + HiveMetastoreServiceMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (HiveMetastoreServiceGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new HiveMetastoreServiceFileDescriptorSupplier()) + .addMethod(getCreateHiveCatalogMethod()) + .addMethod(getGetHiveCatalogMethod()) + .addMethod(getListHiveCatalogsMethod()) + .addMethod(getUpdateHiveCatalogMethod()) + .addMethod(getDeleteHiveCatalogMethod()) + .addMethod(getCreateHiveDatabaseMethod()) + .addMethod(getGetHiveDatabaseMethod()) + .addMethod(getListHiveDatabasesMethod()) + .addMethod(getUpdateHiveDatabaseMethod()) + .addMethod(getDeleteHiveDatabaseMethod()) + .addMethod(getCreateHiveTableMethod()) + .addMethod(getGetHiveTableMethod()) + .addMethod(getListHiveTablesMethod()) + .addMethod(getUpdateHiveTableMethod()) + .addMethod(getDeleteHiveTableMethod()) + .addMethod(getBatchCreatePartitionsMethod()) + .addMethod(getBatchDeletePartitionsMethod()) + .addMethod(getBatchUpdatePartitionsMethod()) + .addMethod(getListPartitionsMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-biglake/pom.xml b/java-biglake/pom.xml index daf822d279d9..3c7c2666654c 100644 --- a/java-biglake/pom.xml +++ b/java-biglake/pom.xml @@ -31,6 +31,16 @@ google-cloud-biglake 0.76.0-SNAPSHOT + + com.google.api.grpc + proto-google-cloud-biglake-v1beta + 0.76.0-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-biglake-v1beta + 0.76.0-SNAPSHOT + com.google.api.grpc proto-google-cloud-biglake-v1 @@ -58,8 +68,10 @@ google-cloud-biglake grpc-google-cloud-biglake-v1alpha1 grpc-google-cloud-biglake-v1 + grpc-google-cloud-biglake-v1beta proto-google-cloud-biglake-v1alpha1 proto-google-cloud-biglake-v1 + proto-google-cloud-biglake-v1beta google-cloud-biglake-bom diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/clirr-ignored-differences.xml b/java-biglake/proto-google-cloud-biglake-v1beta/clirr-ignored-differences.xml new file mode 100644 index 000000000000..dc5e4275c55a --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/clirr-ignored-differences.xml @@ -0,0 +1,80 @@ + + + + + 7012 + com/google/cloud/biglake/hive/v1beta/*OrBuilder + * get*(*) + + + 7012 + com/google/cloud/biglake/hive/v1beta/*OrBuilder + boolean contains*(*) + + + 7012 + com/google/cloud/biglake/hive/v1beta/*OrBuilder + boolean has*(*) + + + + 7006 + com/google/cloud/biglake/hive/v1beta/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/cloud/biglake/hive/v1beta/** + * addRepeatedField(*) + ** + + + 7006 + com/google/cloud/biglake/hive/v1beta/** + * clear() + ** + + + 7006 + com/google/cloud/biglake/hive/v1beta/** + * clearField(*) + ** + + + 7006 + com/google/cloud/biglake/hive/v1beta/** + * clearOneof(*) + ** + + + 7006 + com/google/cloud/biglake/hive/v1beta/** + * clone() + ** + + + 7006 + com/google/cloud/biglake/hive/v1beta/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/cloud/biglake/hive/v1beta/** + * setField(*) + ** + + + 7006 + com/google/cloud/biglake/hive/v1beta/** + * setRepeatedField(*) + ** + + + 7006 + com/google/cloud/biglake/hive/v1beta/** + * setUnknownFields(*) + ** + + diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/pom.xml b/java-biglake/proto-google-cloud-biglake-v1beta/pom.xml new file mode 100644 index 000000000000..38359c335e1e --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/pom.xml @@ -0,0 +1,37 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-biglake-v1beta + 0.76.0-SNAPSHOT + proto-google-cloud-biglake-v1beta + Proto library for google-cloud-biglake + + com.google.cloud + google-cloud-biglake-parent + 0.76.0-SNAPSHOT + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api.grpc + proto-google-iam-v1 + + + com.google.api + api-common + + + com.google.guava + guava + + + diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchCreatePartitionsRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchCreatePartitionsRequest.java new file mode 100644 index 000000000000..7352538d0306 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchCreatePartitionsRequest.java @@ -0,0 +1,1329 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the BatchCreatePartitions method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest} + */ +@com.google.protobuf.Generated +public final class BatchCreatePartitionsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest) + BatchCreatePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BatchCreatePartitionsRequest"); + } + + // Use BatchCreatePartitionsRequest.newBuilder() to construct. + private BatchCreatePartitionsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BatchCreatePartitionsRequest() { + parent_ = ""; + requests_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest.class, + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to where the partitions to be added, in
+   * the format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to where the partitions to be added, in
+   * the format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUESTS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List requests_; + + /** + * + * + *
+   * Required. Requests to add partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getRequestsList() { + return requests_; + } + + /** + * + * + *
+   * Required. Requests to add partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.biglake.hive.v1beta.CreatePartitionRequestOrBuilder> + getRequestsOrBuilderList() { + return requests_; + } + + /** + * + * + *
+   * Required. Requests to add partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getRequestsCount() { + return requests_.size(); + } + + /** + * + * + *
+   * Required. Requests to add partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest getRequests(int index) { + return requests_.get(index); + } + + /** + * + * + *
+   * Required. Requests to add partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreatePartitionRequestOrBuilder getRequestsOrBuilder( + int index) { + return requests_.get(index); + } + + public static final int SKIP_EXISTING_PARTITIONS_FIELD_NUMBER = 3; + private boolean skipExistingPartitions_ = false; + + /** + * + * + *
+   * Optional. Corresponds to the `ifNotExists` flag in the Hive Metastore APIs.
+   * If the flag is set to false, the server will return ALREADY_EXISTS if any
+   * partition already exists. If the flag is set to true, the server will skip
+   * existing partitions and insert only the non-existing partitions.
+   * A maximum of 900 partitions can be inserted in a batch.
+   * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + @java.lang.Override + public boolean getSkipExistingPartitions() { + return skipExistingPartitions_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + for (int i = 0; i < requests_.size(); i++) { + output.writeMessage(2, requests_.get(i)); + } + if (skipExistingPartitions_ != false) { + output.writeBool(3, skipExistingPartitions_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + for (int i = 0; i < requests_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, requests_.get(i)); + } + if (skipExistingPartitions_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, skipExistingPartitions_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest other = + (com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getRequestsList().equals(other.getRequestsList())) return false; + if (getSkipExistingPartitions() != other.getSkipExistingPartitions()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getRequestsCount() > 0) { + hash = (37 * hash) + REQUESTS_FIELD_NUMBER; + hash = (53 * hash) + getRequestsList().hashCode(); + } + hash = (37 * hash) + SKIP_EXISTING_PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSkipExistingPartitions()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the BatchCreatePartitions method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest) + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest.class, + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest.Builder.class); + } + + // Construct using + // com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + if (requestsBuilder_ == null) { + requests_ = java.util.Collections.emptyList(); + } else { + requests_ = null; + requestsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + skipExistingPartitions_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest build() { + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest result = + new com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest result) { + if (requestsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + requests_ = java.util.Collections.unmodifiableList(requests_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.requests_ = requests_; + } else { + result.requests_ = requestsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.skipExistingPartitions_ = skipExistingPartitions_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest other) { + if (other + == com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (requestsBuilder_ == null) { + if (!other.requests_.isEmpty()) { + if (requests_.isEmpty()) { + requests_ = other.requests_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRequestsIsMutable(); + requests_.addAll(other.requests_); + } + onChanged(); + } + } else { + if (!other.requests_.isEmpty()) { + if (requestsBuilder_.isEmpty()) { + requestsBuilder_.dispose(); + requestsBuilder_ = null; + requests_ = other.requests_; + bitField0_ = (bitField0_ & ~0x00000002); + requestsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetRequestsFieldBuilder() + : null; + } else { + requestsBuilder_.addAllMessages(other.requests_); + } + } + } + if (other.getSkipExistingPartitions() != false) { + setSkipExistingPartitions(other.getSkipExistingPartitions()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.parser(), + extensionRegistry); + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(m); + } else { + requestsBuilder_.addMessage(m); + } + break; + } // case 18 + case 24: + { + skipExistingPartitions_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to where the partitions to be added, in
+     * the format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the partitions to be added, in
+     * the format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the partitions to be added, in
+     * the format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the partitions to be added, in
+     * the format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the partitions to be added, in
+     * the format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List requests_ = + java.util.Collections.emptyList(); + + private void ensureRequestsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + requests_ = + new java.util.ArrayList( + requests_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest, + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.Builder, + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequestOrBuilder> + requestsBuilder_; + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getRequestsList() { + if (requestsBuilder_ == null) { + return java.util.Collections.unmodifiableList(requests_); + } else { + return requestsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getRequestsCount() { + if (requestsBuilder_ == null) { + return requests_.size(); + } else { + return requestsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest getRequests(int index) { + if (requestsBuilder_ == null) { + return requests_.get(index); + } else { + return requestsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setRequests( + int index, com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.set(index, value); + onChanged(); + } else { + requestsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setRequests( + int index, + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.Builder builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.set(index, builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests(com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.add(value); + onChanged(); + } else { + requestsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + int index, com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.add(index, value); + onChanged(); + } else { + requestsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.Builder builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + int index, + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.Builder builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(index, builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllRequests( + java.lang.Iterable + values) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, requests_); + onChanged(); + } else { + requestsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearRequests() { + if (requestsBuilder_ == null) { + requests_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + requestsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeRequests(int index) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.remove(index); + onChanged(); + } else { + requestsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.Builder getRequestsBuilder( + int index) { + return internalGetRequestsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.CreatePartitionRequestOrBuilder + getRequestsOrBuilder(int index) { + if (requestsBuilder_ == null) { + return requests_.get(index); + } else { + return requestsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + ? extends com.google.cloud.biglake.hive.v1beta.CreatePartitionRequestOrBuilder> + getRequestsOrBuilderList() { + if (requestsBuilder_ != null) { + return requestsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(requests_); + } + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.Builder + addRequestsBuilder() { + return internalGetRequestsFieldBuilder() + .addBuilder( + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.Builder addRequestsBuilder( + int index) { + return internalGetRequestsFieldBuilder() + .addBuilder( + index, + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Requests to add partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getRequestsBuilderList() { + return internalGetRequestsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest, + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.Builder, + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequestOrBuilder> + internalGetRequestsFieldBuilder() { + if (requestsBuilder_ == null) { + requestsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest, + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.Builder, + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequestOrBuilder>( + requests_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + requests_ = null; + } + return requestsBuilder_; + } + + private boolean skipExistingPartitions_; + + /** + * + * + *
+     * Optional. Corresponds to the `ifNotExists` flag in the Hive Metastore APIs.
+     * If the flag is set to false, the server will return ALREADY_EXISTS if any
+     * partition already exists. If the flag is set to true, the server will skip
+     * existing partitions and insert only the non-existing partitions.
+     * A maximum of 900 partitions can be inserted in a batch.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + @java.lang.Override + public boolean getSkipExistingPartitions() { + return skipExistingPartitions_; + } + + /** + * + * + *
+     * Optional. Corresponds to the `ifNotExists` flag in the Hive Metastore APIs.
+     * If the flag is set to false, the server will return ALREADY_EXISTS if any
+     * partition already exists. If the flag is set to true, the server will skip
+     * existing partitions and insert only the non-existing partitions.
+     * A maximum of 900 partitions can be inserted in a batch.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The skipExistingPartitions to set. + * @return This builder for chaining. + */ + public Builder setSkipExistingPartitions(boolean value) { + + skipExistingPartitions_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Corresponds to the `ifNotExists` flag in the Hive Metastore APIs.
+     * If the flag is set to false, the server will return ALREADY_EXISTS if any
+     * partition already exists. If the flag is set to true, the server will skip
+     * existing partitions and insert only the non-existing partitions.
+     * A maximum of 900 partitions can be inserted in a batch.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearSkipExistingPartitions() { + bitField0_ = (bitField0_ & ~0x00000004); + skipExistingPartitions_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest) + private static final com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCreatePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchCreatePartitionsRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchCreatePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..60d192b5ffad --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchCreatePartitionsRequestOrBuilder.java @@ -0,0 +1,146 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface BatchCreatePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to where the partitions to be added, in
+   * the format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to where the partitions to be added, in
+   * the format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Requests to add partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getRequestsList(); + + /** + * + * + *
+   * Required. Requests to add partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest getRequests(int index); + + /** + * + * + *
+   * Required. Requests to add partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getRequestsCount(); + + /** + * + * + *
+   * Required. Requests to add partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getRequestsOrBuilderList(); + + /** + * + * + *
+   * Required. Requests to add partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.CreatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequestOrBuilder getRequestsOrBuilder( + int index); + + /** + * + * + *
+   * Optional. Corresponds to the `ifNotExists` flag in the Hive Metastore APIs.
+   * If the flag is set to false, the server will return ALREADY_EXISTS if any
+   * partition already exists. If the flag is set to true, the server will skip
+   * existing partitions and insert only the non-existing partitions.
+   * A maximum of 900 partitions can be inserted in a batch.
+   * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + boolean getSkipExistingPartitions(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchCreatePartitionsResponse.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchCreatePartitionsResponse.java new file mode 100644 index 000000000000..49b27ed077ed --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchCreatePartitionsResponse.java @@ -0,0 +1,937 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Response message for BatchCreatePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse} + */ +@com.google.protobuf.Generated +public final class BatchCreatePartitionsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse) + BatchCreatePartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BatchCreatePartitionsResponse"); + } + + // Use BatchCreatePartitionsResponse.newBuilder() to construct. + private BatchCreatePartitionsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BatchCreatePartitionsResponse() { + partitions_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse.class, + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse.Builder.class); + } + + public static final int PARTITIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List partitions_; + + /** + * + * + *
+   * The list of partitions that have been added.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + @java.lang.Override + public java.util.List getPartitionsList() { + return partitions_; + } + + /** + * + * + *
+   * The list of partitions that have been added.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + @java.lang.Override + public java.util.List + getPartitionsOrBuilderList() { + return partitions_; + } + + /** + * + * + *
+   * The list of partitions that have been added.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + @java.lang.Override + public int getPartitionsCount() { + return partitions_.size(); + } + + /** + * + * + *
+   * The list of partitions that have been added.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.Partition getPartitions(int index) { + return partitions_.get(index); + } + + /** + * + * + *
+   * The list of partitions that have been added.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionsOrBuilder(int index) { + return partitions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < partitions_.size(); i++) { + output.writeMessage(1, partitions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < partitions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, partitions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse other = + (com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse) obj; + + if (!getPartitionsList().equals(other.getPartitionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPartitionsCount() > 0) { + hash = (37 * hash) + PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for BatchCreatePartitions.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse) + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse.class, + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse.Builder.class); + } + + // Construct using + // com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + } else { + partitions_ = null; + partitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse build() { + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse buildPartial() { + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse result = + new com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse result) { + if (partitionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + partitions_ = java.util.Collections.unmodifiableList(partitions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.partitions_ = partitions_; + } else { + result.partitions_ = partitionsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse) { + return mergeFrom( + (com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse other) { + if (other + == com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse + .getDefaultInstance()) return this; + if (partitionsBuilder_ == null) { + if (!other.partitions_.isEmpty()) { + if (partitions_.isEmpty()) { + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePartitionsIsMutable(); + partitions_.addAll(other.partitions_); + } + onChanged(); + } + } else { + if (!other.partitions_.isEmpty()) { + if (partitionsBuilder_.isEmpty()) { + partitionsBuilder_.dispose(); + partitionsBuilder_ = null; + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + partitionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetPartitionsFieldBuilder() + : null; + } else { + partitionsBuilder_.addAllMessages(other.partitions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.biglake.hive.v1beta.Partition m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.Partition.parser(), extensionRegistry); + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(m); + } else { + partitionsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List partitions_ = + java.util.Collections.emptyList(); + + private void ensurePartitionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + partitions_ = + new java.util.ArrayList(partitions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder> + partitionsBuilder_; + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public java.util.List getPartitionsList() { + if (partitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitions_); + } else { + return partitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public int getPartitionsCount() { + if (partitionsBuilder_ == null) { + return partitions_.size(); + } else { + return partitionsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public com.google.cloud.biglake.hive.v1beta.Partition getPartitions(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder setPartitions(int index, com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.set(index, value); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder setPartitions( + int index, com.google.cloud.biglake.hive.v1beta.Partition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder addPartitions(com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(value); + onChanged(); + } else { + partitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder addPartitions(int index, com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(index, value); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder addPartitions( + com.google.cloud.biglake.hive.v1beta.Partition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder addPartitions( + int index, com.google.cloud.biglake.hive.v1beta.Partition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder addAllPartitions( + java.lang.Iterable values) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitions_); + onChanged(); + } else { + partitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder clearPartitions() { + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + partitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder removePartitions(int index) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.remove(index); + onChanged(); + } else { + partitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public com.google.cloud.biglake.hive.v1beta.Partition.Builder getPartitionsBuilder(int index) { + return internalGetPartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionsOrBuilder( + int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public java.util.List + getPartitionsOrBuilderList() { + if (partitionsBuilder_ != null) { + return partitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitions_); + } + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public com.google.cloud.biglake.hive.v1beta.Partition.Builder addPartitionsBuilder() { + return internalGetPartitionsFieldBuilder() + .addBuilder(com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public com.google.cloud.biglake.hive.v1beta.Partition.Builder addPartitionsBuilder(int index) { + return internalGetPartitionsFieldBuilder() + .addBuilder(index, com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of partitions that have been added.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public java.util.List + getPartitionsBuilderList() { + return internalGetPartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder> + internalGetPartitionsFieldBuilder() { + if (partitionsBuilder_ == null) { + partitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder>( + partitions_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + partitions_ = null; + } + return partitionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse) + private static final com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse(); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCreatePartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchCreatePartitionsResponseOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchCreatePartitionsResponseOrBuilder.java new file mode 100644 index 000000000000..d10a3bdb3855 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchCreatePartitionsResponseOrBuilder.java @@ -0,0 +1,84 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface BatchCreatePartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of partitions that have been added.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + java.util.List getPartitionsList(); + + /** + * + * + *
+   * The list of partitions that have been added.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + com.google.cloud.biglake.hive.v1beta.Partition getPartitions(int index); + + /** + * + * + *
+   * The list of partitions that have been added.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + int getPartitionsCount(); + + /** + * + * + *
+   * The list of partitions that have been added.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + java.util.List + getPartitionsOrBuilderList(); + + /** + * + * + *
+   * The list of partitions that have been added.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionsOrBuilder(int index); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchDeletePartitionsRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchDeletePartitionsRequest.java new file mode 100644 index 000000000000..d0d520d41b05 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchDeletePartitionsRequest.java @@ -0,0 +1,1239 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for BatchDeletePartitions. The Partition is
+ * uniquely identified by values, which is an ordered list. Hence, there is no
+ * separate name or partition id field.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest} + */ +@com.google.protobuf.Generated +public final class BatchDeletePartitionsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest) + BatchDeletePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BatchDeletePartitionsRequest"); + } + + // Use BatchDeletePartitionsRequest.newBuilder() to construct. + private BatchDeletePartitionsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BatchDeletePartitionsRequest() { + parent_ = ""; + partitionValues_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchDeletePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchDeletePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest.class, + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to which these partitions belong, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to which these partitions belong, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARTITION_VALUES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List partitionValues_; + + /** + * + * + *
+   * Required. The list of partitions (identified by its values) to be deleted.
+   * A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getPartitionValuesList() { + return partitionValues_; + } + + /** + * + * + *
+   * Required. The list of partitions (identified by its values) to be deleted.
+   * A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getPartitionValuesOrBuilderList() { + return partitionValues_; + } + + /** + * + * + *
+   * Required. The list of partitions (identified by its values) to be deleted.
+   * A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getPartitionValuesCount() { + return partitionValues_.size(); + } + + /** + * + * + *
+   * Required. The list of partitions (identified by its values) to be deleted.
+   * A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.PartitionValues getPartitionValues(int index) { + return partitionValues_.get(index); + } + + /** + * + * + *
+   * Required. The list of partitions (identified by its values) to be deleted.
+   * A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.PartitionValuesOrBuilder getPartitionValuesOrBuilder( + int index) { + return partitionValues_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + for (int i = 0; i < partitionValues_.size(); i++) { + output.writeMessage(2, partitionValues_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + for (int i = 0; i < partitionValues_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, partitionValues_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest other = + (com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getPartitionValuesList().equals(other.getPartitionValuesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getPartitionValuesCount() > 0) { + hash = (37 * hash) + PARTITION_VALUES_FIELD_NUMBER; + hash = (53 * hash) + getPartitionValuesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for BatchDeletePartitions. The Partition is
+   * uniquely identified by values, which is an ordered list. Hence, there is no
+   * separate name or partition id field.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest) + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchDeletePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchDeletePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest.class, + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest.Builder.class); + } + + // Construct using + // com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + if (partitionValuesBuilder_ == null) { + partitionValues_ = java.util.Collections.emptyList(); + } else { + partitionValues_ = null; + partitionValuesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchDeletePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest build() { + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest result = + new com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest result) { + if (partitionValuesBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + partitionValues_ = java.util.Collections.unmodifiableList(partitionValues_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.partitionValues_ = partitionValues_; + } else { + result.partitionValues_ = partitionValuesBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest other) { + if (other + == com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (partitionValuesBuilder_ == null) { + if (!other.partitionValues_.isEmpty()) { + if (partitionValues_.isEmpty()) { + partitionValues_ = other.partitionValues_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensurePartitionValuesIsMutable(); + partitionValues_.addAll(other.partitionValues_); + } + onChanged(); + } + } else { + if (!other.partitionValues_.isEmpty()) { + if (partitionValuesBuilder_.isEmpty()) { + partitionValuesBuilder_.dispose(); + partitionValuesBuilder_ = null; + partitionValues_ = other.partitionValues_; + bitField0_ = (bitField0_ & ~0x00000002); + partitionValuesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetPartitionValuesFieldBuilder() + : null; + } else { + partitionValuesBuilder_.addAllMessages(other.partitionValues_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.biglake.hive.v1beta.PartitionValues m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.PartitionValues.parser(), + extensionRegistry); + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.add(m); + } else { + partitionValuesBuilder_.addMessage(m); + } + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List partitionValues_ = + java.util.Collections.emptyList(); + + private void ensurePartitionValuesIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + partitionValues_ = + new java.util.ArrayList( + partitionValues_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.PartitionValues, + com.google.cloud.biglake.hive.v1beta.PartitionValues.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionValuesOrBuilder> + partitionValuesBuilder_; + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getPartitionValuesList() { + if (partitionValuesBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitionValues_); + } else { + return partitionValuesBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getPartitionValuesCount() { + if (partitionValuesBuilder_ == null) { + return partitionValues_.size(); + } else { + return partitionValuesBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.PartitionValues getPartitionValues(int index) { + if (partitionValuesBuilder_ == null) { + return partitionValues_.get(index); + } else { + return partitionValuesBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartitionValues( + int index, com.google.cloud.biglake.hive.v1beta.PartitionValues value) { + if (partitionValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionValuesIsMutable(); + partitionValues_.set(index, value); + onChanged(); + } else { + partitionValuesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartitionValues( + int index, com.google.cloud.biglake.hive.v1beta.PartitionValues.Builder builderForValue) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionValuesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitionValues(com.google.cloud.biglake.hive.v1beta.PartitionValues value) { + if (partitionValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionValuesIsMutable(); + partitionValues_.add(value); + onChanged(); + } else { + partitionValuesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitionValues( + int index, com.google.cloud.biglake.hive.v1beta.PartitionValues value) { + if (partitionValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionValuesIsMutable(); + partitionValues_.add(index, value); + onChanged(); + } else { + partitionValuesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitionValues( + com.google.cloud.biglake.hive.v1beta.PartitionValues.Builder builderForValue) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.add(builderForValue.build()); + onChanged(); + } else { + partitionValuesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitionValues( + int index, com.google.cloud.biglake.hive.v1beta.PartitionValues.Builder builderForValue) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionValuesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllPartitionValues( + java.lang.Iterable values) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitionValues_); + onChanged(); + } else { + partitionValuesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearPartitionValues() { + if (partitionValuesBuilder_ == null) { + partitionValues_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + partitionValuesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removePartitionValues(int index) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.remove(index); + onChanged(); + } else { + partitionValuesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.PartitionValues.Builder getPartitionValuesBuilder( + int index) { + return internalGetPartitionValuesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.PartitionValuesOrBuilder + getPartitionValuesOrBuilder(int index) { + if (partitionValuesBuilder_ == null) { + return partitionValues_.get(index); + } else { + return partitionValuesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getPartitionValuesOrBuilderList() { + if (partitionValuesBuilder_ != null) { + return partitionValuesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitionValues_); + } + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.PartitionValues.Builder + addPartitionValuesBuilder() { + return internalGetPartitionValuesFieldBuilder() + .addBuilder(com.google.cloud.biglake.hive.v1beta.PartitionValues.getDefaultInstance()); + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.PartitionValues.Builder addPartitionValuesBuilder( + int index) { + return internalGetPartitionValuesFieldBuilder() + .addBuilder( + index, com.google.cloud.biglake.hive.v1beta.PartitionValues.getDefaultInstance()); + } + + /** + * + * + *
+     * Required. The list of partitions (identified by its values) to be deleted.
+     * A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getPartitionValuesBuilderList() { + return internalGetPartitionValuesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.PartitionValues, + com.google.cloud.biglake.hive.v1beta.PartitionValues.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionValuesOrBuilder> + internalGetPartitionValuesFieldBuilder() { + if (partitionValuesBuilder_ == null) { + partitionValuesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.PartitionValues, + com.google.cloud.biglake.hive.v1beta.PartitionValues.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionValuesOrBuilder>( + partitionValues_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + partitionValues_ = null; + } + return partitionValuesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest) + private static final com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchDeletePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchDeletePartitionsRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchDeletePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..8cd3d980c0e0 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchDeletePartitionsRequestOrBuilder.java @@ -0,0 +1,134 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface BatchDeletePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to which these partitions belong, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to which these partitions belong, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The list of partitions (identified by its values) to be deleted.
+   * A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getPartitionValuesList(); + + /** + * + * + *
+   * Required. The list of partitions (identified by its values) to be deleted.
+   * A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.PartitionValues getPartitionValues(int index); + + /** + * + * + *
+   * Required. The list of partitions (identified by its values) to be deleted.
+   * A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getPartitionValuesCount(); + + /** + * + * + *
+   * Required. The list of partitions (identified by its values) to be deleted.
+   * A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getPartitionValuesOrBuilderList(); + + /** + * + * + *
+   * Required. The list of partitions (identified by its values) to be deleted.
+   * A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.PartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.PartitionValuesOrBuilder getPartitionValuesOrBuilder( + int index); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchUpdatePartitionsRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchUpdatePartitionsRequest.java new file mode 100644 index 000000000000..70e217282bab --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchUpdatePartitionsRequest.java @@ -0,0 +1,1216 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for BatchUpdatePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest} + */ +@com.google.protobuf.Generated +public final class BatchUpdatePartitionsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest) + BatchUpdatePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BatchUpdatePartitionsRequest"); + } + + // Use BatchUpdatePartitionsRequest.newBuilder() to construct. + private BatchUpdatePartitionsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BatchUpdatePartitionsRequest() { + parent_ = ""; + requests_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest.class, + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to which these partitions belong, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to which these partitions belong, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUESTS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List requests_; + + /** + * + * + *
+   * Required. Requests to update partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getRequestsList() { + return requests_; + } + + /** + * + * + *
+   * Required. Requests to update partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequestOrBuilder> + getRequestsOrBuilderList() { + return requests_; + } + + /** + * + * + *
+   * Required. Requests to update partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getRequestsCount() { + return requests_.size(); + } + + /** + * + * + *
+   * Required. Requests to update partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest getRequests(int index) { + return requests_.get(index); + } + + /** + * + * + *
+   * Required. Requests to update partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequestOrBuilder getRequestsOrBuilder( + int index) { + return requests_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + for (int i = 0; i < requests_.size(); i++) { + output.writeMessage(2, requests_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + for (int i = 0; i < requests_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, requests_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest other = + (com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getRequestsList().equals(other.getRequestsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getRequestsCount() > 0) { + hash = (37 * hash) + REQUESTS_FIELD_NUMBER; + hash = (53 * hash) + getRequestsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for BatchUpdatePartitions.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest) + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest.class, + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest.Builder.class); + } + + // Construct using + // com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + if (requestsBuilder_ == null) { + requests_ = java.util.Collections.emptyList(); + } else { + requests_ = null; + requestsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest build() { + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest result = + new com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest result) { + if (requestsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + requests_ = java.util.Collections.unmodifiableList(requests_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.requests_ = requests_; + } else { + result.requests_ = requestsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest other) { + if (other + == com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (requestsBuilder_ == null) { + if (!other.requests_.isEmpty()) { + if (requests_.isEmpty()) { + requests_ = other.requests_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRequestsIsMutable(); + requests_.addAll(other.requests_); + } + onChanged(); + } + } else { + if (!other.requests_.isEmpty()) { + if (requestsBuilder_.isEmpty()) { + requestsBuilder_.dispose(); + requestsBuilder_ = null; + requests_ = other.requests_; + bitField0_ = (bitField0_ & ~0x00000002); + requestsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetRequestsFieldBuilder() + : null; + } else { + requestsBuilder_.addAllMessages(other.requests_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.parser(), + extensionRegistry); + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(m); + } else { + requestsBuilder_.addMessage(m); + } + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List requests_ = + java.util.Collections.emptyList(); + + private void ensureRequestsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + requests_ = + new java.util.ArrayList( + requests_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest, + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.Builder, + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequestOrBuilder> + requestsBuilder_; + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getRequestsList() { + if (requestsBuilder_ == null) { + return java.util.Collections.unmodifiableList(requests_); + } else { + return requestsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getRequestsCount() { + if (requestsBuilder_ == null) { + return requests_.size(); + } else { + return requestsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest getRequests(int index) { + if (requestsBuilder_ == null) { + return requests_.get(index); + } else { + return requestsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setRequests( + int index, com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.set(index, value); + onChanged(); + } else { + requestsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setRequests( + int index, + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.Builder builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.set(index, builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests(com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.add(value); + onChanged(); + } else { + requestsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + int index, com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.add(index, value); + onChanged(); + } else { + requestsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.Builder builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + int index, + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.Builder builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(index, builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllRequests( + java.lang.Iterable + values) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, requests_); + onChanged(); + } else { + requestsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearRequests() { + if (requestsBuilder_ == null) { + requests_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + requestsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeRequests(int index) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.remove(index); + onChanged(); + } else { + requestsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.Builder getRequestsBuilder( + int index) { + return internalGetRequestsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequestOrBuilder + getRequestsOrBuilder(int index) { + if (requestsBuilder_ == null) { + return requests_.get(index); + } else { + return requestsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + ? extends com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequestOrBuilder> + getRequestsOrBuilderList() { + if (requestsBuilder_ != null) { + return requestsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(requests_); + } + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.Builder + addRequestsBuilder() { + return internalGetRequestsFieldBuilder() + .addBuilder( + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.Builder addRequestsBuilder( + int index) { + return internalGetRequestsFieldBuilder() + .addBuilder( + index, + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Requests to update partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getRequestsBuilderList() { + return internalGetRequestsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest, + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.Builder, + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequestOrBuilder> + internalGetRequestsFieldBuilder() { + if (requestsBuilder_ == null) { + requestsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest, + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.Builder, + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequestOrBuilder>( + requests_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + requests_ = null; + } + return requestsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest) + private static final com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchUpdatePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchUpdatePartitionsRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchUpdatePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..2ba00f189fdb --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchUpdatePartitionsRequestOrBuilder.java @@ -0,0 +1,129 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface BatchUpdatePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to which these partitions belong, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to which these partitions belong, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Requests to update partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getRequestsList(); + + /** + * + * + *
+   * Required. Requests to update partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest getRequests(int index); + + /** + * + * + *
+   * Required. Requests to update partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getRequestsCount(); + + /** + * + * + *
+   * Required. Requests to update partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getRequestsOrBuilderList(); + + /** + * + * + *
+   * Required. Requests to update partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.UpdatePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequestOrBuilder getRequestsOrBuilder( + int index); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchUpdatePartitionsResponse.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchUpdatePartitionsResponse.java new file mode 100644 index 000000000000..065519003aff --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchUpdatePartitionsResponse.java @@ -0,0 +1,960 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Response message for BatchUpdatePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse} + */ +@com.google.protobuf.Generated +public final class BatchUpdatePartitionsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse) + BatchUpdatePartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BatchUpdatePartitionsResponse"); + } + + // Use BatchUpdatePartitionsResponse.newBuilder() to construct. + private BatchUpdatePartitionsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BatchUpdatePartitionsResponse() { + partitions_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse.class, + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse.Builder.class); + } + + public static final int PARTITIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List partitions_; + + /** + * + * + *
+   * The list of partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + @java.lang.Override + public java.util.List getPartitionsList() { + return partitions_; + } + + /** + * + * + *
+   * The list of partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + @java.lang.Override + public java.util.List + getPartitionsOrBuilderList() { + return partitions_; + } + + /** + * + * + *
+   * The list of partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + @java.lang.Override + public int getPartitionsCount() { + return partitions_.size(); + } + + /** + * + * + *
+   * The list of partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.Partition getPartitions(int index) { + return partitions_.get(index); + } + + /** + * + * + *
+   * The list of partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionsOrBuilder(int index) { + return partitions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < partitions_.size(); i++) { + output.writeMessage(1, partitions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < partitions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, partitions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse other = + (com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse) obj; + + if (!getPartitionsList().equals(other.getPartitionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPartitionsCount() > 0) { + hash = (37 * hash) + PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for BatchUpdatePartitions.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse) + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse.class, + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse.Builder.class); + } + + // Construct using + // com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + } else { + partitions_ = null; + partitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse build() { + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse buildPartial() { + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse result = + new com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse result) { + if (partitionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + partitions_ = java.util.Collections.unmodifiableList(partitions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.partitions_ = partitions_; + } else { + result.partitions_ = partitionsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse) { + return mergeFrom( + (com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse other) { + if (other + == com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse + .getDefaultInstance()) return this; + if (partitionsBuilder_ == null) { + if (!other.partitions_.isEmpty()) { + if (partitions_.isEmpty()) { + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePartitionsIsMutable(); + partitions_.addAll(other.partitions_); + } + onChanged(); + } + } else { + if (!other.partitions_.isEmpty()) { + if (partitionsBuilder_.isEmpty()) { + partitionsBuilder_.dispose(); + partitionsBuilder_ = null; + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + partitionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetPartitionsFieldBuilder() + : null; + } else { + partitionsBuilder_.addAllMessages(other.partitions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.biglake.hive.v1beta.Partition m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.Partition.parser(), extensionRegistry); + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(m); + } else { + partitionsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List partitions_ = + java.util.Collections.emptyList(); + + private void ensurePartitionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + partitions_ = + new java.util.ArrayList(partitions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder> + partitionsBuilder_; + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public java.util.List getPartitionsList() { + if (partitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitions_); + } else { + return partitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public int getPartitionsCount() { + if (partitionsBuilder_ == null) { + return partitions_.size(); + } else { + return partitionsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public com.google.cloud.biglake.hive.v1beta.Partition getPartitions(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder setPartitions(int index, com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.set(index, value); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder setPartitions( + int index, com.google.cloud.biglake.hive.v1beta.Partition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder addPartitions(com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(value); + onChanged(); + } else { + partitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder addPartitions(int index, com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(index, value); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder addPartitions( + com.google.cloud.biglake.hive.v1beta.Partition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder addPartitions( + int index, com.google.cloud.biglake.hive.v1beta.Partition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder addAllPartitions( + java.lang.Iterable values) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitions_); + onChanged(); + } else { + partitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder clearPartitions() { + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + partitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public Builder removePartitions(int index) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.remove(index); + onChanged(); + } else { + partitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public com.google.cloud.biglake.hive.v1beta.Partition.Builder getPartitionsBuilder(int index) { + return internalGetPartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionsOrBuilder( + int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public java.util.List + getPartitionsOrBuilderList() { + if (partitionsBuilder_ != null) { + return partitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitions_); + } + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public com.google.cloud.biglake.hive.v1beta.Partition.Builder addPartitionsBuilder() { + return internalGetPartitionsFieldBuilder() + .addBuilder(com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public com.google.cloud.biglake.hive.v1beta.Partition.Builder addPartitionsBuilder(int index) { + return internalGetPartitionsFieldBuilder() + .addBuilder(index, com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + public java.util.List + getPartitionsBuilderList() { + return internalGetPartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder> + internalGetPartitionsFieldBuilder() { + if (partitionsBuilder_ == null) { + partitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder>( + partitions_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + partitions_ = null; + } + return partitionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse) + private static final com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse(); + } + + public static com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchUpdatePartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchUpdatePartitionsResponseOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchUpdatePartitionsResponseOrBuilder.java new file mode 100644 index 000000000000..3b03759976d0 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/BatchUpdatePartitionsResponseOrBuilder.java @@ -0,0 +1,89 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface BatchUpdatePartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + java.util.List getPartitionsList(); + + /** + * + * + *
+   * The list of partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + com.google.cloud.biglake.hive.v1beta.Partition getPartitions(int index); + + /** + * + * + *
+   * The list of partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + int getPartitionsCount(); + + /** + * + * + *
+   * The list of partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + java.util.List + getPartitionsOrBuilderList(); + + /** + * + * + *
+   * The list of partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1; + */ + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionsOrBuilder(int index); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CatalogName.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CatalogName.java new file mode 100644 index 000000000000..6ac07fdd219f --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CatalogName.java @@ -0,0 +1,192 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class CatalogName implements ResourceName { + private static final PathTemplate PROJECT_CATALOG = + PathTemplate.createWithoutUrlEncoding("projects/{project}/catalogs/{catalog}"); + private volatile Map fieldValuesMap; + private final String project; + private final String catalog; + + @Deprecated + protected CatalogName() { + project = null; + catalog = null; + } + + private CatalogName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + catalog = Preconditions.checkNotNull(builder.getCatalog()); + } + + public String getProject() { + return project; + } + + public String getCatalog() { + return catalog; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static CatalogName of(String project, String catalog) { + return newBuilder().setProject(project).setCatalog(catalog).build(); + } + + public static String format(String project, String catalog) { + return newBuilder().setProject(project).setCatalog(catalog).build().toString(); + } + + public static CatalogName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_CATALOG.validatedMatch( + formattedString, "CatalogName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("catalog")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (CatalogName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_CATALOG.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (catalog != null) { + fieldMapBuilder.put("catalog", catalog); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_CATALOG.instantiate("project", project, "catalog", catalog); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + CatalogName that = ((CatalogName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.catalog, that.catalog); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(catalog); + return h; + } + + /** Builder for projects/{project}/catalogs/{catalog}. */ + public static class Builder { + private String project; + private String catalog; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getCatalog() { + return catalog; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setCatalog(String catalog) { + this.catalog = catalog; + return this; + } + + private Builder(CatalogName catalogName) { + this.project = catalogName.project; + this.catalog = catalogName.catalog; + } + + public CatalogName build() { + return new CatalogName(this); + } + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveCatalogRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveCatalogRequest.java new file mode 100644 index 000000000000..3748a70aa909 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveCatalogRequest.java @@ -0,0 +1,1378 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the CreateHiveCatalog method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest} + */ +@com.google.protobuf.Generated +public final class CreateHiveCatalogRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest) + CreateHiveCatalogRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateHiveCatalogRequest"); + } + + // Use CreateHiveCatalogRequest.newBuilder() to construct. + private CreateHiveCatalogRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateHiveCatalogRequest() { + parent_ = ""; + hiveCatalogId_ = ""; + primaryLocation_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveCatalogRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveCatalogRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest.class, + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. The parent resource where this catalog will be created.
+   * Format: projects/{project_id_or_number}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The parent resource where this catalog will be created.
+   * Format: projects/{project_id_or_number}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int HIVE_CATALOG_FIELD_NUMBER = 2; + private com.google.cloud.biglake.hive.v1beta.HiveCatalog hiveCatalog_; + + /** + * + * + *
+   * Required. The catalog to create.
+   * The `name` field does not need to be provided. Gets copied over from
+   * catalog_id.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveCatalog field is set. + */ + @java.lang.Override + public boolean hasHiveCatalog() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The catalog to create.
+   * The `name` field does not need to be provided. Gets copied over from
+   * catalog_id.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveCatalog. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog getHiveCatalog() { + return hiveCatalog_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance() + : hiveCatalog_; + } + + /** + * + * + *
+   * Required. The catalog to create.
+   * The `name` field does not need to be provided. Gets copied over from
+   * catalog_id.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder getHiveCatalogOrBuilder() { + return hiveCatalog_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance() + : hiveCatalog_; + } + + public static final int HIVE_CATALOG_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object hiveCatalogId_ = ""; + + /** + * + * + *
+   * Required. The Hive Catalog ID to use for the catalog that will become the
+   * final component of the catalog's resource name. The maximum length is 256
+   * characters.
+   * 
+ * + * string hive_catalog_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The hiveCatalogId. + */ + @java.lang.Override + public java.lang.String getHiveCatalogId() { + java.lang.Object ref = hiveCatalogId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + hiveCatalogId_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The Hive Catalog ID to use for the catalog that will become the
+   * final component of the catalog's resource name. The maximum length is 256
+   * characters.
+   * 
+ * + * string hive_catalog_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for hiveCatalogId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getHiveCatalogIdBytes() { + java.lang.Object ref = hiveCatalogId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + hiveCatalogId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PRIMARY_LOCATION_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object primaryLocation_ = ""; + + /** + * + * + *
+   * Required. The GCP region that specifies where the catalog metadata is
+   * stored, e.g. us-central1, EU, etc.
+   * 
+ * + * + * string primary_location = 4 [json_name = "primary_location", (.google.api.field_behavior) = REQUIRED]; + * + * + * @return The primaryLocation. + */ + @java.lang.Override + public java.lang.String getPrimaryLocation() { + java.lang.Object ref = primaryLocation_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + primaryLocation_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The GCP region that specifies where the catalog metadata is
+   * stored, e.g. us-central1, EU, etc.
+   * 
+ * + * + * string primary_location = 4 [json_name = "primary_location", (.google.api.field_behavior) = REQUIRED]; + * + * + * @return The bytes for primaryLocation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPrimaryLocationBytes() { + java.lang.Object ref = primaryLocation_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + primaryLocation_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getHiveCatalog()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(hiveCatalogId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, hiveCatalogId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(primaryLocation_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, primaryLocation_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getHiveCatalog()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(hiveCatalogId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, hiveCatalogId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(primaryLocation_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, primaryLocation_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest other = + (com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasHiveCatalog() != other.hasHiveCatalog()) return false; + if (hasHiveCatalog()) { + if (!getHiveCatalog().equals(other.getHiveCatalog())) return false; + } + if (!getHiveCatalogId().equals(other.getHiveCatalogId())) return false; + if (!getPrimaryLocation().equals(other.getPrimaryLocation())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasHiveCatalog()) { + hash = (37 * hash) + HIVE_CATALOG_FIELD_NUMBER; + hash = (53 * hash) + getHiveCatalog().hashCode(); + } + hash = (37 * hash) + HIVE_CATALOG_ID_FIELD_NUMBER; + hash = (53 * hash) + getHiveCatalogId().hashCode(); + hash = (37 * hash) + PRIMARY_LOCATION_FIELD_NUMBER; + hash = (53 * hash) + getPrimaryLocation().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the CreateHiveCatalog method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest) + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveCatalogRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveCatalogRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest.class, + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetHiveCatalogFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + hiveCatalog_ = null; + if (hiveCatalogBuilder_ != null) { + hiveCatalogBuilder_.dispose(); + hiveCatalogBuilder_ = null; + } + hiveCatalogId_ = ""; + primaryLocation_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveCatalogRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest build() { + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest result = + new com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.hiveCatalog_ = + hiveCatalogBuilder_ == null ? hiveCatalog_ : hiveCatalogBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.hiveCatalogId_ = hiveCatalogId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.primaryLocation_ = primaryLocation_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest other) { + if (other + == com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasHiveCatalog()) { + mergeHiveCatalog(other.getHiveCatalog()); + } + if (!other.getHiveCatalogId().isEmpty()) { + hiveCatalogId_ = other.hiveCatalogId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getPrimaryLocation().isEmpty()) { + primaryLocation_ = other.primaryLocation_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetHiveCatalogFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + hiveCatalogId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + primaryLocation_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. The parent resource where this catalog will be created.
+     * Format: projects/{project_id_or_number}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The parent resource where this catalog will be created.
+     * Format: projects/{project_id_or_number}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The parent resource where this catalog will be created.
+     * Format: projects/{project_id_or_number}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The parent resource where this catalog will be created.
+     * Format: projects/{project_id_or_number}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The parent resource where this catalog will be created.
+     * Format: projects/{project_id_or_number}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.cloud.biglake.hive.v1beta.HiveCatalog hiveCatalog_; + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveCatalog, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder, + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder> + hiveCatalogBuilder_; + + /** + * + * + *
+     * Required. The catalog to create.
+     * The `name` field does not need to be provided. Gets copied over from
+     * catalog_id.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveCatalog field is set. + */ + public boolean hasHiveCatalog() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. The catalog to create.
+     * The `name` field does not need to be provided. Gets copied over from
+     * catalog_id.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveCatalog. + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog getHiveCatalog() { + if (hiveCatalogBuilder_ == null) { + return hiveCatalog_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance() + : hiveCatalog_; + } else { + return hiveCatalogBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The catalog to create.
+     * The `name` field does not need to be provided. Gets copied over from
+     * catalog_id.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setHiveCatalog(com.google.cloud.biglake.hive.v1beta.HiveCatalog value) { + if (hiveCatalogBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hiveCatalog_ = value; + } else { + hiveCatalogBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The catalog to create.
+     * The `name` field does not need to be provided. Gets copied over from
+     * catalog_id.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setHiveCatalog( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder builderForValue) { + if (hiveCatalogBuilder_ == null) { + hiveCatalog_ = builderForValue.build(); + } else { + hiveCatalogBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The catalog to create.
+     * The `name` field does not need to be provided. Gets copied over from
+     * catalog_id.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeHiveCatalog(com.google.cloud.biglake.hive.v1beta.HiveCatalog value) { + if (hiveCatalogBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && hiveCatalog_ != null + && hiveCatalog_ + != com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance()) { + getHiveCatalogBuilder().mergeFrom(value); + } else { + hiveCatalog_ = value; + } + } else { + hiveCatalogBuilder_.mergeFrom(value); + } + if (hiveCatalog_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The catalog to create.
+     * The `name` field does not need to be provided. Gets copied over from
+     * catalog_id.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearHiveCatalog() { + bitField0_ = (bitField0_ & ~0x00000002); + hiveCatalog_ = null; + if (hiveCatalogBuilder_ != null) { + hiveCatalogBuilder_.dispose(); + hiveCatalogBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The catalog to create.
+     * The `name` field does not need to be provided. Gets copied over from
+     * catalog_id.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder getHiveCatalogBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetHiveCatalogFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The catalog to create.
+     * The `name` field does not need to be provided. Gets copied over from
+     * catalog_id.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder getHiveCatalogOrBuilder() { + if (hiveCatalogBuilder_ != null) { + return hiveCatalogBuilder_.getMessageOrBuilder(); + } else { + return hiveCatalog_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance() + : hiveCatalog_; + } + } + + /** + * + * + *
+     * Required. The catalog to create.
+     * The `name` field does not need to be provided. Gets copied over from
+     * catalog_id.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveCatalog, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder, + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder> + internalGetHiveCatalogFieldBuilder() { + if (hiveCatalogBuilder_ == null) { + hiveCatalogBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveCatalog, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder, + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder>( + getHiveCatalog(), getParentForChildren(), isClean()); + hiveCatalog_ = null; + } + return hiveCatalogBuilder_; + } + + private java.lang.Object hiveCatalogId_ = ""; + + /** + * + * + *
+     * Required. The Hive Catalog ID to use for the catalog that will become the
+     * final component of the catalog's resource name. The maximum length is 256
+     * characters.
+     * 
+ * + * string hive_catalog_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The hiveCatalogId. + */ + public java.lang.String getHiveCatalogId() { + java.lang.Object ref = hiveCatalogId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + hiveCatalogId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The Hive Catalog ID to use for the catalog that will become the
+     * final component of the catalog's resource name. The maximum length is 256
+     * characters.
+     * 
+ * + * string hive_catalog_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for hiveCatalogId. + */ + public com.google.protobuf.ByteString getHiveCatalogIdBytes() { + java.lang.Object ref = hiveCatalogId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + hiveCatalogId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The Hive Catalog ID to use for the catalog that will become the
+     * final component of the catalog's resource name. The maximum length is 256
+     * characters.
+     * 
+ * + * string hive_catalog_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The hiveCatalogId to set. + * @return This builder for chaining. + */ + public Builder setHiveCatalogId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + hiveCatalogId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The Hive Catalog ID to use for the catalog that will become the
+     * final component of the catalog's resource name. The maximum length is 256
+     * characters.
+     * 
+ * + * string hive_catalog_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearHiveCatalogId() { + hiveCatalogId_ = getDefaultInstance().getHiveCatalogId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The Hive Catalog ID to use for the catalog that will become the
+     * final component of the catalog's resource name. The maximum length is 256
+     * characters.
+     * 
+ * + * string hive_catalog_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for hiveCatalogId to set. + * @return This builder for chaining. + */ + public Builder setHiveCatalogIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + hiveCatalogId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object primaryLocation_ = ""; + + /** + * + * + *
+     * Required. The GCP region that specifies where the catalog metadata is
+     * stored, e.g. us-central1, EU, etc.
+     * 
+ * + * + * string primary_location = 4 [json_name = "primary_location", (.google.api.field_behavior) = REQUIRED]; + * + * + * @return The primaryLocation. + */ + public java.lang.String getPrimaryLocation() { + java.lang.Object ref = primaryLocation_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + primaryLocation_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The GCP region that specifies where the catalog metadata is
+     * stored, e.g. us-central1, EU, etc.
+     * 
+ * + * + * string primary_location = 4 [json_name = "primary_location", (.google.api.field_behavior) = REQUIRED]; + * + * + * @return The bytes for primaryLocation. + */ + public com.google.protobuf.ByteString getPrimaryLocationBytes() { + java.lang.Object ref = primaryLocation_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + primaryLocation_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The GCP region that specifies where the catalog metadata is
+     * stored, e.g. us-central1, EU, etc.
+     * 
+ * + * + * string primary_location = 4 [json_name = "primary_location", (.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The primaryLocation to set. + * @return This builder for chaining. + */ + public Builder setPrimaryLocation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + primaryLocation_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The GCP region that specifies where the catalog metadata is
+     * stored, e.g. us-central1, EU, etc.
+     * 
+ * + * + * string primary_location = 4 [json_name = "primary_location", (.google.api.field_behavior) = REQUIRED]; + * + * + * @return This builder for chaining. + */ + public Builder clearPrimaryLocation() { + primaryLocation_ = getDefaultInstance().getPrimaryLocation(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The GCP region that specifies where the catalog metadata is
+     * stored, e.g. us-central1, EU, etc.
+     * 
+ * + * + * string primary_location = 4 [json_name = "primary_location", (.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The bytes for primaryLocation to set. + * @return This builder for chaining. + */ + public Builder setPrimaryLocationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + primaryLocation_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest) + private static final com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateHiveCatalogRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveCatalogRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveCatalogRequestOrBuilder.java new file mode 100644 index 000000000000..8d99c67797b4 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveCatalogRequestOrBuilder.java @@ -0,0 +1,171 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface CreateHiveCatalogRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The parent resource where this catalog will be created.
+   * Format: projects/{project_id_or_number}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. The parent resource where this catalog will be created.
+   * Format: projects/{project_id_or_number}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The catalog to create.
+   * The `name` field does not need to be provided. Gets copied over from
+   * catalog_id.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveCatalog field is set. + */ + boolean hasHiveCatalog(); + + /** + * + * + *
+   * Required. The catalog to create.
+   * The `name` field does not need to be provided. Gets copied over from
+   * catalog_id.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveCatalog. + */ + com.google.cloud.biglake.hive.v1beta.HiveCatalog getHiveCatalog(); + + /** + * + * + *
+   * Required. The catalog to create.
+   * The `name` field does not need to be provided. Gets copied over from
+   * catalog_id.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder getHiveCatalogOrBuilder(); + + /** + * + * + *
+   * Required. The Hive Catalog ID to use for the catalog that will become the
+   * final component of the catalog's resource name. The maximum length is 256
+   * characters.
+   * 
+ * + * string hive_catalog_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The hiveCatalogId. + */ + java.lang.String getHiveCatalogId(); + + /** + * + * + *
+   * Required. The Hive Catalog ID to use for the catalog that will become the
+   * final component of the catalog's resource name. The maximum length is 256
+   * characters.
+   * 
+ * + * string hive_catalog_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for hiveCatalogId. + */ + com.google.protobuf.ByteString getHiveCatalogIdBytes(); + + /** + * + * + *
+   * Required. The GCP region that specifies where the catalog metadata is
+   * stored, e.g. us-central1, EU, etc.
+   * 
+ * + * + * string primary_location = 4 [json_name = "primary_location", (.google.api.field_behavior) = REQUIRED]; + * + * + * @return The primaryLocation. + */ + java.lang.String getPrimaryLocation(); + + /** + * + * + *
+   * Required. The GCP region that specifies where the catalog metadata is
+   * stored, e.g. us-central1, EU, etc.
+   * 
+ * + * + * string primary_location = 4 [json_name = "primary_location", (.google.api.field_behavior) = REQUIRED]; + * + * + * @return The bytes for primaryLocation. + */ + com.google.protobuf.ByteString getPrimaryLocationBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveDatabaseRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveDatabaseRequest.java new file mode 100644 index 000000000000..1df0e106b7e0 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveDatabaseRequest.java @@ -0,0 +1,1158 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the CreateHiveDatabase method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest} + */ +@com.google.protobuf.Generated +public final class CreateHiveDatabaseRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest) + CreateHiveDatabaseRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateHiveDatabaseRequest"); + } + + // Use CreateHiveDatabaseRequest.newBuilder() to construct. + private CreateHiveDatabaseRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateHiveDatabaseRequest() { + parent_ = ""; + hiveDatabaseId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest.class, + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. The parent resource where this database will be created.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The parent resource where this database will be created.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int HIVE_DATABASE_FIELD_NUMBER = 2; + private com.google.cloud.biglake.hive.v1beta.HiveDatabase hiveDatabase_; + + /** + * + * + *
+   * Required. The database to create.
+   * The `name` field does not need to be provided.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveDatabase field is set. + */ + @java.lang.Override + public boolean hasHiveDatabase() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The database to create.
+   * The `name` field does not need to be provided.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveDatabase. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveDatabase getHiveDatabase() { + return hiveDatabase_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance() + : hiveDatabase_; + } + + /** + * + * + *
+   * Required. The database to create.
+   * The `name` field does not need to be provided.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder getHiveDatabaseOrBuilder() { + return hiveDatabase_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance() + : hiveDatabase_; + } + + public static final int HIVE_DATABASE_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object hiveDatabaseId_ = ""; + + /** + * + * + *
+   * Required. The ID to use for the Hive Database.
+   * The maximum length is 128 characters.
+   * 
+ * + * string hive_database_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The hiveDatabaseId. + */ + @java.lang.Override + public java.lang.String getHiveDatabaseId() { + java.lang.Object ref = hiveDatabaseId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + hiveDatabaseId_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The ID to use for the Hive Database.
+   * The maximum length is 128 characters.
+   * 
+ * + * string hive_database_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for hiveDatabaseId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getHiveDatabaseIdBytes() { + java.lang.Object ref = hiveDatabaseId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + hiveDatabaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getHiveDatabase()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(hiveDatabaseId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, hiveDatabaseId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getHiveDatabase()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(hiveDatabaseId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, hiveDatabaseId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest other = + (com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasHiveDatabase() != other.hasHiveDatabase()) return false; + if (hasHiveDatabase()) { + if (!getHiveDatabase().equals(other.getHiveDatabase())) return false; + } + if (!getHiveDatabaseId().equals(other.getHiveDatabaseId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasHiveDatabase()) { + hash = (37 * hash) + HIVE_DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getHiveDatabase().hashCode(); + } + hash = (37 * hash) + HIVE_DATABASE_ID_FIELD_NUMBER; + hash = (53 * hash) + getHiveDatabaseId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the CreateHiveDatabase method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest) + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest.class, + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetHiveDatabaseFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + hiveDatabase_ = null; + if (hiveDatabaseBuilder_ != null) { + hiveDatabaseBuilder_.dispose(); + hiveDatabaseBuilder_ = null; + } + hiveDatabaseId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveDatabaseRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest build() { + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest result = + new com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.hiveDatabase_ = + hiveDatabaseBuilder_ == null ? hiveDatabase_ : hiveDatabaseBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.hiveDatabaseId_ = hiveDatabaseId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest other) { + if (other + == com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasHiveDatabase()) { + mergeHiveDatabase(other.getHiveDatabase()); + } + if (!other.getHiveDatabaseId().isEmpty()) { + hiveDatabaseId_ = other.hiveDatabaseId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetHiveDatabaseFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + hiveDatabaseId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. The parent resource where this database will be created.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The parent resource where this database will be created.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The parent resource where this database will be created.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The parent resource where this database will be created.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The parent resource where this database will be created.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.cloud.biglake.hive.v1beta.HiveDatabase hiveDatabase_; + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveDatabase, + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder, + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder> + hiveDatabaseBuilder_; + + /** + * + * + *
+     * Required. The database to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveDatabase field is set. + */ + public boolean hasHiveDatabase() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. The database to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveDatabase. + */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase getHiveDatabase() { + if (hiveDatabaseBuilder_ == null) { + return hiveDatabase_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance() + : hiveDatabase_; + } else { + return hiveDatabaseBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The database to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setHiveDatabase(com.google.cloud.biglake.hive.v1beta.HiveDatabase value) { + if (hiveDatabaseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hiveDatabase_ = value; + } else { + hiveDatabaseBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The database to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setHiveDatabase( + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder builderForValue) { + if (hiveDatabaseBuilder_ == null) { + hiveDatabase_ = builderForValue.build(); + } else { + hiveDatabaseBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The database to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeHiveDatabase(com.google.cloud.biglake.hive.v1beta.HiveDatabase value) { + if (hiveDatabaseBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && hiveDatabase_ != null + && hiveDatabase_ + != com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance()) { + getHiveDatabaseBuilder().mergeFrom(value); + } else { + hiveDatabase_ = value; + } + } else { + hiveDatabaseBuilder_.mergeFrom(value); + } + if (hiveDatabase_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The database to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearHiveDatabase() { + bitField0_ = (bitField0_ & ~0x00000002); + hiveDatabase_ = null; + if (hiveDatabaseBuilder_ != null) { + hiveDatabaseBuilder_.dispose(); + hiveDatabaseBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The database to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder getHiveDatabaseBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetHiveDatabaseFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The database to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder getHiveDatabaseOrBuilder() { + if (hiveDatabaseBuilder_ != null) { + return hiveDatabaseBuilder_.getMessageOrBuilder(); + } else { + return hiveDatabase_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance() + : hiveDatabase_; + } + } + + /** + * + * + *
+     * Required. The database to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveDatabase, + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder, + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder> + internalGetHiveDatabaseFieldBuilder() { + if (hiveDatabaseBuilder_ == null) { + hiveDatabaseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveDatabase, + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder, + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder>( + getHiveDatabase(), getParentForChildren(), isClean()); + hiveDatabase_ = null; + } + return hiveDatabaseBuilder_; + } + + private java.lang.Object hiveDatabaseId_ = ""; + + /** + * + * + *
+     * Required. The ID to use for the Hive Database.
+     * The maximum length is 128 characters.
+     * 
+ * + * string hive_database_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The hiveDatabaseId. + */ + public java.lang.String getHiveDatabaseId() { + java.lang.Object ref = hiveDatabaseId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + hiveDatabaseId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The ID to use for the Hive Database.
+     * The maximum length is 128 characters.
+     * 
+ * + * string hive_database_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for hiveDatabaseId. + */ + public com.google.protobuf.ByteString getHiveDatabaseIdBytes() { + java.lang.Object ref = hiveDatabaseId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + hiveDatabaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The ID to use for the Hive Database.
+     * The maximum length is 128 characters.
+     * 
+ * + * string hive_database_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The hiveDatabaseId to set. + * @return This builder for chaining. + */ + public Builder setHiveDatabaseId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + hiveDatabaseId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The ID to use for the Hive Database.
+     * The maximum length is 128 characters.
+     * 
+ * + * string hive_database_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearHiveDatabaseId() { + hiveDatabaseId_ = getDefaultInstance().getHiveDatabaseId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The ID to use for the Hive Database.
+     * The maximum length is 128 characters.
+     * 
+ * + * string hive_database_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for hiveDatabaseId to set. + * @return This builder for chaining. + */ + public Builder setHiveDatabaseIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + hiveDatabaseId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest) + private static final com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateHiveDatabaseRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveDatabaseRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveDatabaseRequestOrBuilder.java new file mode 100644 index 000000000000..6e11b929e2d0 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveDatabaseRequestOrBuilder.java @@ -0,0 +1,136 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface CreateHiveDatabaseRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The parent resource where this database will be created.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. The parent resource where this database will be created.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The database to create.
+   * The `name` field does not need to be provided.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveDatabase field is set. + */ + boolean hasHiveDatabase(); + + /** + * + * + *
+   * Required. The database to create.
+   * The `name` field does not need to be provided.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveDatabase. + */ + com.google.cloud.biglake.hive.v1beta.HiveDatabase getHiveDatabase(); + + /** + * + * + *
+   * Required. The database to create.
+   * The `name` field does not need to be provided.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder getHiveDatabaseOrBuilder(); + + /** + * + * + *
+   * Required. The ID to use for the Hive Database.
+   * The maximum length is 128 characters.
+   * 
+ * + * string hive_database_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The hiveDatabaseId. + */ + java.lang.String getHiveDatabaseId(); + + /** + * + * + *
+   * Required. The ID to use for the Hive Database.
+   * The maximum length is 128 characters.
+   * 
+ * + * string hive_database_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for hiveDatabaseId. + */ + com.google.protobuf.ByteString getHiveDatabaseIdBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveTableRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveTableRequest.java new file mode 100644 index 000000000000..ab5ed9da574a --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveTableRequest.java @@ -0,0 +1,1157 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the CreateHiveTable method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.CreateHiveTableRequest} + */ +@com.google.protobuf.Generated +public final class CreateHiveTableRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.CreateHiveTableRequest) + CreateHiveTableRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateHiveTableRequest"); + } + + // Use CreateHiveTableRequest.newBuilder() to construct. + private CreateHiveTableRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateHiveTableRequest() { + parent_ = ""; + hiveTableId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveTableRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest.class, + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. The parent resource for the table to be created.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The parent resource for the table to be created.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int HIVE_TABLE_FIELD_NUMBER = 2; + private com.google.cloud.biglake.hive.v1beta.HiveTable hiveTable_; + + /** + * + * + *
+   * Required. The Hive Table to create.
+   * The `name` field does not need to be provided.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveTable field is set. + */ + @java.lang.Override + public boolean hasHiveTable() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The Hive Table to create.
+   * The `name` field does not need to be provided.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveTable. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveTable getHiveTable() { + return hiveTable_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance() + : hiveTable_; + } + + /** + * + * + *
+   * Required. The Hive Table to create.
+   * The `name` field does not need to be provided.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder getHiveTableOrBuilder() { + return hiveTable_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance() + : hiveTable_; + } + + public static final int HIVE_TABLE_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object hiveTableId_ = ""; + + /** + * + * + *
+   * Required. The Hive Table ID to use for the table that will become the final
+   * component of the table's resource name. The maximum length is 256
+   * characters.
+   * 
+ * + * string hive_table_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The hiveTableId. + */ + @java.lang.Override + public java.lang.String getHiveTableId() { + java.lang.Object ref = hiveTableId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + hiveTableId_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The Hive Table ID to use for the table that will become the final
+   * component of the table's resource name. The maximum length is 256
+   * characters.
+   * 
+ * + * string hive_table_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for hiveTableId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getHiveTableIdBytes() { + java.lang.Object ref = hiveTableId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + hiveTableId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getHiveTable()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(hiveTableId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, hiveTableId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getHiveTable()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(hiveTableId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, hiveTableId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest other = + (com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasHiveTable() != other.hasHiveTable()) return false; + if (hasHiveTable()) { + if (!getHiveTable().equals(other.getHiveTable())) return false; + } + if (!getHiveTableId().equals(other.getHiveTableId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasHiveTable()) { + hash = (37 * hash) + HIVE_TABLE_FIELD_NUMBER; + hash = (53 * hash) + getHiveTable().hashCode(); + } + hash = (37 * hash) + HIVE_TABLE_ID_FIELD_NUMBER; + hash = (53 * hash) + getHiveTableId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the CreateHiveTable method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.CreateHiveTableRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.CreateHiveTableRequest) + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveTableRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest.class, + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetHiveTableFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + hiveTable_ = null; + if (hiveTableBuilder_ != null) { + hiveTableBuilder_.dispose(); + hiveTableBuilder_ = null; + } + hiveTableId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreateHiveTableRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest build() { + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest result = + new com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.hiveTable_ = hiveTableBuilder_ == null ? hiveTable_ : hiveTableBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.hiveTableId_ = hiveTableId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest other) { + if (other == com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasHiveTable()) { + mergeHiveTable(other.getHiveTable()); + } + if (!other.getHiveTableId().isEmpty()) { + hiveTableId_ = other.hiveTableId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetHiveTableFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + hiveTableId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. The parent resource for the table to be created.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The parent resource for the table to be created.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The parent resource for the table to be created.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The parent resource for the table to be created.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The parent resource for the table to be created.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.cloud.biglake.hive.v1beta.HiveTable hiveTable_; + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveTable, + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder, + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder> + hiveTableBuilder_; + + /** + * + * + *
+     * Required. The Hive Table to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveTable field is set. + */ + public boolean hasHiveTable() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. The Hive Table to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveTable. + */ + public com.google.cloud.biglake.hive.v1beta.HiveTable getHiveTable() { + if (hiveTableBuilder_ == null) { + return hiveTable_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance() + : hiveTable_; + } else { + return hiveTableBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The Hive Table to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setHiveTable(com.google.cloud.biglake.hive.v1beta.HiveTable value) { + if (hiveTableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hiveTable_ = value; + } else { + hiveTableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The Hive Table to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setHiveTable( + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder builderForValue) { + if (hiveTableBuilder_ == null) { + hiveTable_ = builderForValue.build(); + } else { + hiveTableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The Hive Table to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeHiveTable(com.google.cloud.biglake.hive.v1beta.HiveTable value) { + if (hiveTableBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && hiveTable_ != null + && hiveTable_ != com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance()) { + getHiveTableBuilder().mergeFrom(value); + } else { + hiveTable_ = value; + } + } else { + hiveTableBuilder_.mergeFrom(value); + } + if (hiveTable_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The Hive Table to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearHiveTable() { + bitField0_ = (bitField0_ & ~0x00000002); + hiveTable_ = null; + if (hiveTableBuilder_ != null) { + hiveTableBuilder_.dispose(); + hiveTableBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The Hive Table to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveTable.Builder getHiveTableBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetHiveTableFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The Hive Table to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder getHiveTableOrBuilder() { + if (hiveTableBuilder_ != null) { + return hiveTableBuilder_.getMessageOrBuilder(); + } else { + return hiveTable_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance() + : hiveTable_; + } + } + + /** + * + * + *
+     * Required. The Hive Table to create.
+     * The `name` field does not need to be provided.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveTable, + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder, + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder> + internalGetHiveTableFieldBuilder() { + if (hiveTableBuilder_ == null) { + hiveTableBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveTable, + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder, + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder>( + getHiveTable(), getParentForChildren(), isClean()); + hiveTable_ = null; + } + return hiveTableBuilder_; + } + + private java.lang.Object hiveTableId_ = ""; + + /** + * + * + *
+     * Required. The Hive Table ID to use for the table that will become the final
+     * component of the table's resource name. The maximum length is 256
+     * characters.
+     * 
+ * + * string hive_table_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The hiveTableId. + */ + public java.lang.String getHiveTableId() { + java.lang.Object ref = hiveTableId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + hiveTableId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The Hive Table ID to use for the table that will become the final
+     * component of the table's resource name. The maximum length is 256
+     * characters.
+     * 
+ * + * string hive_table_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for hiveTableId. + */ + public com.google.protobuf.ByteString getHiveTableIdBytes() { + java.lang.Object ref = hiveTableId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + hiveTableId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The Hive Table ID to use for the table that will become the final
+     * component of the table's resource name. The maximum length is 256
+     * characters.
+     * 
+ * + * string hive_table_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The hiveTableId to set. + * @return This builder for chaining. + */ + public Builder setHiveTableId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + hiveTableId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The Hive Table ID to use for the table that will become the final
+     * component of the table's resource name. The maximum length is 256
+     * characters.
+     * 
+ * + * string hive_table_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearHiveTableId() { + hiveTableId_ = getDefaultInstance().getHiveTableId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The Hive Table ID to use for the table that will become the final
+     * component of the table's resource name. The maximum length is 256
+     * characters.
+     * 
+ * + * string hive_table_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for hiveTableId to set. + * @return This builder for chaining. + */ + public Builder setHiveTableIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + hiveTableId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.CreateHiveTableRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.CreateHiveTableRequest) + private static final com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateHiveTableRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveTableRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveTableRequestOrBuilder.java new file mode 100644 index 000000000000..deeb474dbd80 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreateHiveTableRequestOrBuilder.java @@ -0,0 +1,138 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface CreateHiveTableRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.CreateHiveTableRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The parent resource for the table to be created.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. The parent resource for the table to be created.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The Hive Table to create.
+   * The `name` field does not need to be provided.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveTable field is set. + */ + boolean hasHiveTable(); + + /** + * + * + *
+   * Required. The Hive Table to create.
+   * The `name` field does not need to be provided.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveTable. + */ + com.google.cloud.biglake.hive.v1beta.HiveTable getHiveTable(); + + /** + * + * + *
+   * Required. The Hive Table to create.
+   * The `name` field does not need to be provided.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder getHiveTableOrBuilder(); + + /** + * + * + *
+   * Required. The Hive Table ID to use for the table that will become the final
+   * component of the table's resource name. The maximum length is 256
+   * characters.
+   * 
+ * + * string hive_table_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The hiveTableId. + */ + java.lang.String getHiveTableId(); + + /** + * + * + *
+   * Required. The Hive Table ID to use for the table that will become the final
+   * component of the table's resource name. The maximum length is 256
+   * characters.
+   * 
+ * + * string hive_table_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for hiveTableId. + */ + com.google.protobuf.ByteString getHiveTableIdBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreatePartitionRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreatePartitionRequest.java new file mode 100644 index 000000000000..d4185f001855 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreatePartitionRequest.java @@ -0,0 +1,946 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for CreatePartition. The Partition is
+ * uniquely identified by values, which is an ordered list. Hence, there is no
+ * separate name or partition id field.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.CreatePartitionRequest} + */ +@com.google.protobuf.Generated +public final class CreatePartitionRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.CreatePartitionRequest) + CreatePartitionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreatePartitionRequest"); + } + + // Use CreatePartitionRequest.newBuilder() to construct. + private CreatePartitionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreatePartitionRequest() { + parent_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreatePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreatePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.class, + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to where the partition to be added, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{databases}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to where the partition to be added, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{databases}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARTITION_FIELD_NUMBER = 2; + private com.google.cloud.biglake.hive.v1beta.Partition partition_; + + /** + * + * + *
+   * Required. The partition to be added.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the partition field is set. + */ + @java.lang.Override + public boolean hasPartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The partition to be added.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The partition. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.Partition getPartition() { + return partition_ == null + ? com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance() + : partition_; + } + + /** + * + * + *
+   * Required. The partition to be added.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionOrBuilder() { + return partition_ == null + ? com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance() + : partition_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getPartition()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getPartition()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest other = + (com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasPartition() != other.hasPartition()) return false; + if (hasPartition()) { + if (!getPartition().equals(other.getPartition())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasPartition()) { + hash = (37 * hash) + PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getPartition().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for CreatePartition. The Partition is
+   * uniquely identified by values, which is an ordered list. Hence, there is no
+   * separate name or partition id field.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.CreatePartitionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.CreatePartitionRequest) + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreatePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreatePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.class, + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetPartitionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + partition_ = null; + if (partitionBuilder_ != null) { + partitionBuilder_.dispose(); + partitionBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_CreatePartitionRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest build() { + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest result = + new com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.partition_ = partitionBuilder_ == null ? partition_ : partitionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest other) { + if (other == com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasPartition()) { + mergePartition(other.getPartition()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetPartitionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.cloud.biglake.hive.v1beta.Partition partition_; + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder> + partitionBuilder_; + + /** + * + * + *
+     * Required. The partition to be added.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the partition field is set. + */ + public boolean hasPartition() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. The partition to be added.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The partition. + */ + public com.google.cloud.biglake.hive.v1beta.Partition getPartition() { + if (partitionBuilder_ == null) { + return partition_ == null + ? com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance() + : partition_; + } else { + return partitionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The partition to be added.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartition(com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + partition_ = value; + } else { + partitionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The partition to be added.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartition( + com.google.cloud.biglake.hive.v1beta.Partition.Builder builderForValue) { + if (partitionBuilder_ == null) { + partition_ = builderForValue.build(); + } else { + partitionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The partition to be added.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergePartition(com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && partition_ != null + && partition_ != com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance()) { + getPartitionBuilder().mergeFrom(value); + } else { + partition_ = value; + } + } else { + partitionBuilder_.mergeFrom(value); + } + if (partition_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The partition to be added.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearPartition() { + bitField0_ = (bitField0_ & ~0x00000002); + partition_ = null; + if (partitionBuilder_ != null) { + partitionBuilder_.dispose(); + partitionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The partition to be added.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.Partition.Builder getPartitionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetPartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The partition to be added.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionOrBuilder() { + if (partitionBuilder_ != null) { + return partitionBuilder_.getMessageOrBuilder(); + } else { + return partition_ == null + ? com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance() + : partition_; + } + } + + /** + * + * + *
+     * Required. The partition to be added.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder> + internalGetPartitionFieldBuilder() { + if (partitionBuilder_ == null) { + partitionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder>( + getPartition(), getParentForChildren(), isClean()); + partition_ = null; + } + return partitionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.CreatePartitionRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.CreatePartitionRequest) + private static final com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreatePartitionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreatePartitionRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreatePartitionRequestOrBuilder.java new file mode 100644 index 000000000000..d70ef343312f --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/CreatePartitionRequestOrBuilder.java @@ -0,0 +1,105 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface CreatePartitionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.CreatePartitionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to where the partition to be added, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{databases}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to where the partition to be added, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{databases}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The partition to be added.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the partition field is set. + */ + boolean hasPartition(); + + /** + * + * + *
+   * Required. The partition to be added.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The partition. + */ + com.google.cloud.biglake.hive.v1beta.Partition getPartition(); + + /** + * + * + *
+   * Required. The partition to be added.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionOrBuilder(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveCatalogRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveCatalogRequest.java new file mode 100644 index 000000000000..9be437337d3f --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveCatalogRequest.java @@ -0,0 +1,629 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the DeleteHiveCatalog method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest} + */ +@com.google.protobuf.Generated +public final class DeleteHiveCatalogRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest) + DeleteHiveCatalogRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteHiveCatalogRequest"); + } + + // Use DeleteHiveCatalogRequest.newBuilder() to construct. + private DeleteHiveCatalogRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteHiveCatalogRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveCatalogRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveCatalogRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest.class, + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name of the catalog to delete.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the catalog to delete.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest other = + (com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the DeleteHiveCatalog method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest) + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveCatalogRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveCatalogRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest.class, + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveCatalogRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest build() { + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest result = + new com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest other) { + if (other + == com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name of the catalog to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the catalog to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the catalog to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the catalog to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the catalog to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest) + private static final com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteHiveCatalogRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveCatalogRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveCatalogRequestOrBuilder.java new file mode 100644 index 000000000000..6b803d02397f --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveCatalogRequestOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface DeleteHiveCatalogRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the catalog to delete.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name of the catalog to delete.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveDatabaseRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveDatabaseRequest.java new file mode 100644 index 000000000000..d7db9b0077c1 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveDatabaseRequest.java @@ -0,0 +1,631 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the DeleteHiveDatabase method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest} + */ +@com.google.protobuf.Generated +public final class DeleteHiveDatabaseRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest) + DeleteHiveDatabaseRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteHiveDatabaseRequest"); + } + + // Use DeleteHiveDatabaseRequest.newBuilder() to construct. + private DeleteHiveDatabaseRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteHiveDatabaseRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest.class, + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name of the database to delete.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the database to delete.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest other = + (com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the DeleteHiveDatabase method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest) + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest.class, + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveDatabaseRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest build() { + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest result = + new com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest other) { + if (other + == com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name of the database to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the database to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the database to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the database to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the database to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest) + private static final com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteHiveDatabaseRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveDatabaseRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveDatabaseRequestOrBuilder.java new file mode 100644 index 000000000000..b00bf7d5c2ee --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveDatabaseRequestOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface DeleteHiveDatabaseRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the database to delete.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name of the database to delete.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveTableRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveTableRequest.java new file mode 100644 index 000000000000..d093a391e95f --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveTableRequest.java @@ -0,0 +1,625 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the DeleteHiveTable method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest} + */ +@com.google.protobuf.Generated +public final class DeleteHiveTableRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest) + DeleteHiveTableRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteHiveTableRequest"); + } + + // Use DeleteHiveTableRequest.newBuilder() to construct. + private DeleteHiveTableRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteHiveTableRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveTableRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest.class, + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name of the database to delete.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the database to delete.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest other = + (com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the DeleteHiveTable method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest) + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveTableRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest.class, + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveTableRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest build() { + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest result = + new com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest other) { + if (other == com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name of the database to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the database to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the database to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the database to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the database to delete.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest) + private static final com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteHiveTableRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveTableRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveTableRequestOrBuilder.java new file mode 100644 index 000000000000..8de83275eed2 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/DeleteHiveTableRequestOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface DeleteHiveTableRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the database to delete.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name of the database to delete.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/FieldSchema.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/FieldSchema.java new file mode 100644 index 000000000000..4638985d8c37 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/FieldSchema.java @@ -0,0 +1,974 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Field schema information.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.FieldSchema} + */ +@com.google.protobuf.Generated +public final class FieldSchema extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.FieldSchema) + FieldSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "FieldSchema"); + } + + // Use FieldSchema.newBuilder() to construct. + private FieldSchema(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private FieldSchema() { + name_ = ""; + type_ = ""; + comment_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_FieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_FieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.FieldSchema.class, + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the field. The maximum length is 767 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the field. The maximum length is 767 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object type_ = ""; + + /** + * + * + *
+   * Required. Type of the field. The maximum length is 128 characters.
+   * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The type. + */ + @java.lang.Override + public java.lang.String getType() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Type of the field. The maximum length is 128 characters.
+   * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for type. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int COMMENT_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object comment_ = ""; + + /** + * + * + *
+   * Optional. Comment of the field. The maximum length is 256 characters.
+   * 
+ * + * string comment = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The comment. + */ + @java.lang.Override + public java.lang.String getComment() { + java.lang.Object ref = comment_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + comment_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Comment of the field. The maximum length is 256 characters.
+   * 
+ * + * string comment = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for comment. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCommentBytes() { + java.lang.Object ref = comment_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + comment_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(type_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, type_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(comment_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, comment_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(type_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, type_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(comment_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, comment_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.FieldSchema)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.FieldSchema other = + (com.google.cloud.biglake.hive.v1beta.FieldSchema) obj; + + if (!getName().equals(other.getName())) return false; + if (!getType().equals(other.getType())) return false; + if (!getComment().equals(other.getComment())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType().hashCode(); + hash = (37 * hash) + COMMENT_FIELD_NUMBER; + hash = (53 * hash) + getComment().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.biglake.hive.v1beta.FieldSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Field schema information.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.FieldSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.FieldSchema) + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_FieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_FieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.FieldSchema.class, + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.FieldSchema.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + type_ = ""; + comment_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_FieldSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.FieldSchema getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.FieldSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.FieldSchema build() { + com.google.cloud.biglake.hive.v1beta.FieldSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.FieldSchema buildPartial() { + com.google.cloud.biglake.hive.v1beta.FieldSchema result = + new com.google.cloud.biglake.hive.v1beta.FieldSchema(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.FieldSchema result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = type_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.comment_ = comment_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.FieldSchema) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.FieldSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.FieldSchema other) { + if (other == com.google.cloud.biglake.hive.v1beta.FieldSchema.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getType().isEmpty()) { + type_ = other.type_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getComment().isEmpty()) { + comment_ = other.comment_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + type_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + comment_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the field. The maximum length is 767 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the field. The maximum length is 767 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the field. The maximum length is 767 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the field. The maximum length is 767 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the field. The maximum length is 767 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object type_ = ""; + + /** + * + * + *
+     * Required. Type of the field. The maximum length is 128 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The type. + */ + public java.lang.String getType() { + java.lang.Object ref = type_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Type of the field. The maximum length is 128 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for type. + */ + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Type of the field. The maximum length is 128 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Type of the field. The maximum length is 128 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearType() { + type_ = getDefaultInstance().getType(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Type of the field. The maximum length is 128 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for type to set. + * @return This builder for chaining. + */ + public Builder setTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object comment_ = ""; + + /** + * + * + *
+     * Optional. Comment of the field. The maximum length is 256 characters.
+     * 
+ * + * string comment = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The comment. + */ + public java.lang.String getComment() { + java.lang.Object ref = comment_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + comment_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Comment of the field. The maximum length is 256 characters.
+     * 
+ * + * string comment = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for comment. + */ + public com.google.protobuf.ByteString getCommentBytes() { + java.lang.Object ref = comment_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + comment_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Comment of the field. The maximum length is 256 characters.
+     * 
+ * + * string comment = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The comment to set. + * @return This builder for chaining. + */ + public Builder setComment(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + comment_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Comment of the field. The maximum length is 256 characters.
+     * 
+ * + * string comment = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearComment() { + comment_ = getDefaultInstance().getComment(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Comment of the field. The maximum length is 256 characters.
+     * 
+ * + * string comment = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for comment to set. + * @return This builder for chaining. + */ + public Builder setCommentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + comment_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.FieldSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.FieldSchema) + private static final com.google.cloud.biglake.hive.v1beta.FieldSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.FieldSchema(); + } + + public static com.google.cloud.biglake.hive.v1beta.FieldSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FieldSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.FieldSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/FieldSchemaOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/FieldSchemaOrBuilder.java new file mode 100644 index 000000000000..5dd9d6bcb29e --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/FieldSchemaOrBuilder.java @@ -0,0 +1,106 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface FieldSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.FieldSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the field. The maximum length is 767 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the field. The maximum length is 767 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Required. Type of the field. The maximum length is 128 characters.
+   * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The type. + */ + java.lang.String getType(); + + /** + * + * + *
+   * Required. Type of the field. The maximum length is 128 characters.
+   * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for type. + */ + com.google.protobuf.ByteString getTypeBytes(); + + /** + * + * + *
+   * Optional. Comment of the field. The maximum length is 256 characters.
+   * 
+ * + * string comment = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The comment. + */ + java.lang.String getComment(); + + /** + * + * + *
+   * Optional. Comment of the field. The maximum length is 256 characters.
+   * 
+ * + * string comment = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for comment. + */ + com.google.protobuf.ByteString getCommentBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveCatalogRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveCatalogRequest.java new file mode 100644 index 000000000000..fbda9d066524 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveCatalogRequest.java @@ -0,0 +1,625 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the GetHiveCatalog method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest} + */ +@com.google.protobuf.Generated +public final class GetHiveCatalogRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest) + GetHiveCatalogRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetHiveCatalogRequest"); + } + + // Use GetHiveCatalogRequest.newBuilder() to construct. + private GetHiveCatalogRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetHiveCatalogRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveCatalogRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveCatalogRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest.class, + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name of the catalog to retrieve.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the catalog to retrieve.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest other = + (com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the GetHiveCatalog method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest) + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveCatalogRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveCatalogRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest.class, + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveCatalogRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest build() { + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest result = + new com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest other) { + if (other == com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name of the catalog to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the catalog to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the catalog to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the catalog to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the catalog to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest) + private static final com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetHiveCatalogRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveCatalogRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveCatalogRequestOrBuilder.java new file mode 100644 index 000000000000..4f5ac8960c78 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveCatalogRequestOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface GetHiveCatalogRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the catalog to retrieve.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name of the catalog to retrieve.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveDatabaseRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveDatabaseRequest.java new file mode 100644 index 000000000000..451ea187fc0d --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveDatabaseRequest.java @@ -0,0 +1,625 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the GetHiveDatabase method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest} + */ +@com.google.protobuf.Generated +public final class GetHiveDatabaseRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest) + GetHiveDatabaseRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetHiveDatabaseRequest"); + } + + // Use GetHiveDatabaseRequest.newBuilder() to construct. + private GetHiveDatabaseRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetHiveDatabaseRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest.class, + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name of the database to retrieve.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the database to retrieve.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest other = + (com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the GetHiveDatabase method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest) + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest.class, + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveDatabaseRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest build() { + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest result = + new com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest other) { + if (other == com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name of the database to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the database to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the database to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the database to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the database to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest) + private static final com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetHiveDatabaseRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveDatabaseRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveDatabaseRequestOrBuilder.java new file mode 100644 index 000000000000..d8e8125f6289 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveDatabaseRequestOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface GetHiveDatabaseRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the database to retrieve.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name of the database to retrieve.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveTableRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveTableRequest.java new file mode 100644 index 000000000000..7e8e846b09ec --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveTableRequest.java @@ -0,0 +1,625 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the GetHiveTable method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.GetHiveTableRequest} + */ +@com.google.protobuf.Generated +public final class GetHiveTableRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.GetHiveTableRequest) + GetHiveTableRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetHiveTableRequest"); + } + + // Use GetHiveTableRequest.newBuilder() to construct. + private GetHiveTableRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetHiveTableRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveTableRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest.class, + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name of the table to retrieve.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the table to retrieve.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest other = + (com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the GetHiveTable method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.GetHiveTableRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.GetHiveTableRequest) + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveTableRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest.class, + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_GetHiveTableRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest build() { + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest result = + new com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest other) { + if (other == com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name of the table to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the table to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the table to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the table to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the table to retrieve.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.GetHiveTableRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.GetHiveTableRequest) + private static final com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetHiveTableRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveTableRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveTableRequestOrBuilder.java new file mode 100644 index 000000000000..efb84378f3fe --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/GetHiveTableRequestOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface GetHiveTableRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.GetHiveTableRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the table to retrieve.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name of the table to retrieve.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveCatalog.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveCatalog.java new file mode 100644 index 000000000000..cdb798364f87 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveCatalog.java @@ -0,0 +1,2623 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * The HiveCatalog contains spark/hive databases and tables in the BigLake
+ * Metastore. While creating resources under a catalog, ideally ensure that the
+ * storage bucket location, spark / hive engine location or any other compute
+ * location  match. Catalog can be viewed as the destination for migrating an
+ * on-prem Hive metastore to GCP.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.HiveCatalog} + */ +@com.google.protobuf.Generated +public final class HiveCatalog extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.HiveCatalog) + HiveCatalogOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "HiveCatalog"); + } + + // Use HiveCatalog.newBuilder() to construct. + private HiveCatalog(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private HiveCatalog() { + name_ = ""; + description_ = ""; + locationUri_ = ""; + replicas_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.class, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder.class); + } + + public interface ReplicaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.HiveCatalog.Replica) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Output only. The region of the replica. For example `us-east1`.
+     * 
+ * + * string region = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The region. + */ + java.lang.String getRegion(); + + /** + * + * + *
+     * Output only. The region of the replica. For example `us-east1`.
+     * 
+ * + * string region = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for region. + */ + com.google.protobuf.ByteString getRegionBytes(); + + /** + * + * + *
+     * Output only. The current state of the replica.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + int getStateValue(); + + /** + * + * + *
+     * Output only. The current state of the replica.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State getState(); + } + + /** + * + * + *
+   * The replica of the Catalog.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.HiveCatalog.Replica} + */ + public static final class Replica extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.HiveCatalog.Replica) + ReplicaOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Replica"); + } + + // Use Replica.newBuilder() to construct. + private Replica(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Replica() { + region_ = ""; + state_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_Replica_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_Replica_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.class, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.Builder.class); + } + + /** + * + * + *
+     * If the catalog is replicated to multiple regions, this enum describes the
+     * current state of the replica.
+     * 
+ * + * Protobuf enum {@code google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State} + */ + public enum State implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+       * The replica state is unknown.
+       * 
+ * + * STATE_UNSPECIFIED = 0; + */ + STATE_UNSPECIFIED(0), + /** + * + * + *
+       * Indicates the replica is the writable primary.
+       * 
+ * + * STATE_PRIMARY = 1; + */ + STATE_PRIMARY(1), + /** + * + * + *
+       * Indicates the replica has been recently assigned as the primary, but
+       * not all databases are writeable yet.
+       * 
+ * + * STATE_PRIMARY_IN_PROGRESS = 2; + */ + STATE_PRIMARY_IN_PROGRESS(2), + /** + * + * + *
+       * Indicates the replica is a read-only secondary replica.
+       * 
+ * + * STATE_SECONDARY = 3; + */ + STATE_SECONDARY(3), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "State"); + } + + /** + * + * + *
+       * The replica state is unknown.
+       * 
+ * + * STATE_UNSPECIFIED = 0; + */ + public static final int STATE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+       * Indicates the replica is the writable primary.
+       * 
+ * + * STATE_PRIMARY = 1; + */ + public static final int STATE_PRIMARY_VALUE = 1; + + /** + * + * + *
+       * Indicates the replica has been recently assigned as the primary, but
+       * not all databases are writeable yet.
+       * 
+ * + * STATE_PRIMARY_IN_PROGRESS = 2; + */ + public static final int STATE_PRIMARY_IN_PROGRESS_VALUE = 2; + + /** + * + * + *
+       * Indicates the replica is a read-only secondary replica.
+       * 
+ * + * STATE_SECONDARY = 3; + */ + public static final int STATE_SECONDARY_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static State forNumber(int value) { + switch (value) { + case 0: + return STATE_UNSPECIFIED; + case 1: + return STATE_PRIMARY; + case 2: + return STATE_PRIMARY_IN_PROGRESS; + case 3: + return STATE_SECONDARY; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State) + } + + public static final int REGION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object region_ = ""; + + /** + * + * + *
+     * Output only. The region of the replica. For example `us-east1`.
+     * 
+ * + * string region = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The region. + */ + @java.lang.Override + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + + /** + * + * + *
+     * Output only. The region of the replica. For example `us-east1`.
+     * 
+ * + * string region = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for region. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATE_FIELD_NUMBER = 2; + private int state_ = 0; + + /** + * + * + *
+     * Output only. The current state of the replica.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + + /** + * + * + *
+     * Output only. The current state of the replica.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State getState() { + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State result = + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State.forNumber(state_); + return result == null + ? com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(region_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, region_); + } + if (state_ + != com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State.STATE_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, state_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(region_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, region_); + } + if (state_ + != com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State.STATE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, state_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica other = + (com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica) obj; + + if (!getRegion().equals(other.getRegion())) return false; + if (state_ != other.state_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * The replica of the Catalog.
+     * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.HiveCatalog.Replica} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.HiveCatalog.Replica) + com.google.cloud.biglake.hive.v1beta.HiveCatalog.ReplicaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_Replica_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_Replica_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.class, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + region_ = ""; + state_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_Replica_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica build() { + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica buildPartial() { + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica result = + new com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.region_ = region_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.state_ = state_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica other) { + if (other == com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.getDefaultInstance()) + return this; + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + region_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + state_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object region_ = ""; + + /** + * + * + *
+       * Output only. The region of the replica. For example `us-east1`.
+       * 
+ * + * string region = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The region. + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Output only. The region of the replica. For example `us-east1`.
+       * 
+ * + * string region = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for region. + */ + public com.google.protobuf.ByteString getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Output only. The region of the replica. For example `us-east1`.
+       * 
+ * + * string region = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The region to set. + * @return This builder for chaining. + */ + public Builder setRegion(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. The region of the replica. For example `us-east1`.
+       * 
+ * + * string region = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearRegion() { + region_ = getDefaultInstance().getRegion(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. The region of the replica. For example `us-east1`.
+       * 
+ * + * string region = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for region to set. + * @return This builder for chaining. + */ + public Builder setRegionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + region_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int state_ = 0; + + /** + * + * + *
+       * Output only. The current state of the replica.
+       * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + + /** + * + * + *
+       * Output only. The current state of the replica.
+       * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for state to set. + * @return This builder for chaining. + */ + public Builder setStateValue(int value) { + state_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. The current state of the replica.
+       * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State getState() { + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State result = + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State.forNumber(state_); + return result == null + ? com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State.UNRECOGNIZED + : result; + } + + /** + * + * + *
+       * Output only. The current state of the replica.
+       * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The state to set. + * @return This builder for chaining. + */ + public Builder setState( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + state_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. The current state of the replica.
+       * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000002); + state_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.HiveCatalog.Replica) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.HiveCatalog.Replica) + private static final com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica(); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Replica parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Output only. The resource name.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The resource name.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESCRIPTION_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object description_ = ""; + + /** + * + * + *
+   * Optional. Stores the catalog description.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + @java.lang.Override + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Stores the catalog description.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LOCATION_URI_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object locationUri_ = ""; + + /** + * + * + *
+   * Required. The Cloud Storage location path where the catalog exists.
+   * Format: gs://bucket/path/to/catalog
+   * The maximum length is 4000 characters.
+   * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The locationUri. + */ + @java.lang.Override + public java.lang.String getLocationUri() { + java.lang.Object ref = locationUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationUri_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The Cloud Storage location path where the catalog exists.
+   * Format: gs://bucket/path/to/catalog
+   * The maximum length is 4000 characters.
+   * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for locationUri. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationUriBytes() { + java.lang.Object ref = locationUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REPLICAS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List replicas_; + + /** + * + * + *
+   * Output only. The replicas for the catalog metadata.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getReplicasList() { + return replicas_; + } + + /** + * + * + *
+   * Output only. The replicas for the catalog metadata.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getReplicasOrBuilderList() { + return replicas_; + } + + /** + * + * + *
+   * Output only. The replicas for the catalog metadata.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getReplicasCount() { + return replicas_.size(); + } + + /** + * + * + *
+   * Output only. The replicas for the catalog metadata.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica getReplicas(int index) { + return replicas_.get(index); + } + + /** + * + * + *
+   * Output only. The replicas for the catalog metadata.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.ReplicaOrBuilder getReplicasOrBuilder( + int index) { + return replicas_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(description_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, description_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(locationUri_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, locationUri_); + } + for (int i = 0; i < replicas_.size(); i++) { + output.writeMessage(4, replicas_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(description_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, description_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(locationUri_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, locationUri_); + } + for (int i = 0; i < replicas_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, replicas_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.HiveCatalog)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.HiveCatalog other = + (com.google.cloud.biglake.hive.v1beta.HiveCatalog) obj; + + if (!getName().equals(other.getName())) return false; + if (!getDescription().equals(other.getDescription())) return false; + if (!getLocationUri().equals(other.getLocationUri())) return false; + if (!getReplicasList().equals(other.getReplicasList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; + hash = (53 * hash) + getDescription().hashCode(); + hash = (37 * hash) + LOCATION_URI_FIELD_NUMBER; + hash = (53 * hash) + getLocationUri().hashCode(); + if (getReplicasCount() > 0) { + hash = (37 * hash) + REPLICAS_FIELD_NUMBER; + hash = (53 * hash) + getReplicasList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.biglake.hive.v1beta.HiveCatalog prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * The HiveCatalog contains spark/hive databases and tables in the BigLake
+   * Metastore. While creating resources under a catalog, ideally ensure that the
+   * storage bucket location, spark / hive engine location or any other compute
+   * location  match. Catalog can be viewed as the destination for migrating an
+   * on-prem Hive metastore to GCP.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.HiveCatalog} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.HiveCatalog) + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.class, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.HiveCatalog.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + description_ = ""; + locationUri_ = ""; + if (replicasBuilder_ == null) { + replicas_ = java.util.Collections.emptyList(); + } else { + replicas_ = null; + replicasBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog build() { + com.google.cloud.biglake.hive.v1beta.HiveCatalog result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog buildPartial() { + com.google.cloud.biglake.hive.v1beta.HiveCatalog result = + new com.google.cloud.biglake.hive.v1beta.HiveCatalog(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.biglake.hive.v1beta.HiveCatalog result) { + if (replicasBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + replicas_ = java.util.Collections.unmodifiableList(replicas_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.replicas_ = replicas_; + } else { + result.replicas_ = replicasBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.HiveCatalog result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.description_ = description_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.locationUri_ = locationUri_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.HiveCatalog) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.HiveCatalog) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.HiveCatalog other) { + if (other == com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getDescription().isEmpty()) { + description_ = other.description_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getLocationUri().isEmpty()) { + locationUri_ = other.locationUri_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (replicasBuilder_ == null) { + if (!other.replicas_.isEmpty()) { + if (replicas_.isEmpty()) { + replicas_ = other.replicas_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureReplicasIsMutable(); + replicas_.addAll(other.replicas_); + } + onChanged(); + } + } else { + if (!other.replicas_.isEmpty()) { + if (replicasBuilder_.isEmpty()) { + replicasBuilder_.dispose(); + replicasBuilder_ = null; + replicas_ = other.replicas_; + bitField0_ = (bitField0_ & ~0x00000008); + replicasBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetReplicasFieldBuilder() + : null; + } else { + replicasBuilder_.addAllMessages(other.replicas_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + description_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + locationUri_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.parser(), + extensionRegistry); + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.add(m); + } else { + replicasBuilder_.addMessage(m); + } + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object description_ = ""; + + /** + * + * + *
+     * Optional. Stores the catalog description.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Stores the catalog description.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Stores the catalog description.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The description to set. + * @return This builder for chaining. + */ + public Builder setDescription(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + description_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Stores the catalog description.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDescription() { + description_ = getDefaultInstance().getDescription(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Stores the catalog description.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for description to set. + * @return This builder for chaining. + */ + public Builder setDescriptionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + description_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object locationUri_ = ""; + + /** + * + * + *
+     * Required. The Cloud Storage location path where the catalog exists.
+     * Format: gs://bucket/path/to/catalog
+     * The maximum length is 4000 characters.
+     * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The locationUri. + */ + public java.lang.String getLocationUri() { + java.lang.Object ref = locationUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The Cloud Storage location path where the catalog exists.
+     * Format: gs://bucket/path/to/catalog
+     * The maximum length is 4000 characters.
+     * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for locationUri. + */ + public com.google.protobuf.ByteString getLocationUriBytes() { + java.lang.Object ref = locationUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The Cloud Storage location path where the catalog exists.
+     * Format: gs://bucket/path/to/catalog
+     * The maximum length is 4000 characters.
+     * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The locationUri to set. + * @return This builder for chaining. + */ + public Builder setLocationUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + locationUri_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The Cloud Storage location path where the catalog exists.
+     * Format: gs://bucket/path/to/catalog
+     * The maximum length is 4000 characters.
+     * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearLocationUri() { + locationUri_ = getDefaultInstance().getLocationUri(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The Cloud Storage location path where the catalog exists.
+     * Format: gs://bucket/path/to/catalog
+     * The maximum length is 4000 characters.
+     * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for locationUri to set. + * @return This builder for chaining. + */ + public Builder setLocationUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + locationUri_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.util.List replicas_ = + java.util.Collections.emptyList(); + + private void ensureReplicasIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + replicas_ = + new java.util.ArrayList( + replicas_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.Builder, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.ReplicaOrBuilder> + replicasBuilder_; + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getReplicasList() { + if (replicasBuilder_ == null) { + return java.util.Collections.unmodifiableList(replicas_); + } else { + return replicasBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getReplicasCount() { + if (replicasBuilder_ == null) { + return replicas_.size(); + } else { + return replicasBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica getReplicas(int index) { + if (replicasBuilder_ == null) { + return replicas_.get(index); + } else { + return replicasBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setReplicas( + int index, com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica value) { + if (replicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicasIsMutable(); + replicas_.set(index, value); + onChanged(); + } else { + replicasBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setReplicas( + int index, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.Builder builderForValue) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.set(index, builderForValue.build()); + onChanged(); + } else { + replicasBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addReplicas(com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica value) { + if (replicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicasIsMutable(); + replicas_.add(value); + onChanged(); + } else { + replicasBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addReplicas( + int index, com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica value) { + if (replicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicasIsMutable(); + replicas_.add(index, value); + onChanged(); + } else { + replicasBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addReplicas( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.Builder builderForValue) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.add(builderForValue.build()); + onChanged(); + } else { + replicasBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addReplicas( + int index, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.Builder builderForValue) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.add(index, builderForValue.build()); + onChanged(); + } else { + replicasBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllReplicas( + java.lang.Iterable + values) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, replicas_); + onChanged(); + } else { + replicasBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearReplicas() { + if (replicasBuilder_ == null) { + replicas_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + replicasBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeReplicas(int index) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.remove(index); + onChanged(); + } else { + replicasBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.Builder getReplicasBuilder( + int index) { + return internalGetReplicasFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.ReplicaOrBuilder getReplicasOrBuilder( + int index) { + if (replicasBuilder_ == null) { + return replicas_.get(index); + } else { + return replicasBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List< + ? extends com.google.cloud.biglake.hive.v1beta.HiveCatalog.ReplicaOrBuilder> + getReplicasOrBuilderList() { + if (replicasBuilder_ != null) { + return replicasBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(replicas_); + } + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.Builder addReplicasBuilder() { + return internalGetReplicasFieldBuilder() + .addBuilder( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.Builder addReplicasBuilder( + int index) { + return internalGetReplicasFieldBuilder() + .addBuilder( + index, com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. The replicas for the catalog metadata.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getReplicasBuilderList() { + return internalGetReplicasFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.Builder, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.ReplicaOrBuilder> + internalGetReplicasFieldBuilder() { + if (replicasBuilder_ == null) { + replicasBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica.Builder, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.ReplicaOrBuilder>( + replicas_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); + replicas_ = null; + } + return replicasBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.HiveCatalog) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.HiveCatalog) + private static final com.google.cloud.biglake.hive.v1beta.HiveCatalog DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.HiveCatalog(); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveCatalog getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public HiveCatalog parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveCatalogOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveCatalogOrBuilder.java new file mode 100644 index 000000000000..d889e77d3671 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveCatalogOrBuilder.java @@ -0,0 +1,186 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface HiveCatalogOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.HiveCatalog) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. The resource name.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Output only. The resource name.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Optional. Stores the catalog description.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + java.lang.String getDescription(); + + /** + * + * + *
+   * Optional. Stores the catalog description.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + com.google.protobuf.ByteString getDescriptionBytes(); + + /** + * + * + *
+   * Required. The Cloud Storage location path where the catalog exists.
+   * Format: gs://bucket/path/to/catalog
+   * The maximum length is 4000 characters.
+   * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The locationUri. + */ + java.lang.String getLocationUri(); + + /** + * + * + *
+   * Required. The Cloud Storage location path where the catalog exists.
+   * Format: gs://bucket/path/to/catalog
+   * The maximum length is 4000 characters.
+   * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for locationUri. + */ + com.google.protobuf.ByteString getLocationUriBytes(); + + /** + * + * + *
+   * Output only. The replicas for the catalog metadata.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getReplicasList(); + + /** + * + * + *
+   * Output only. The replicas for the catalog metadata.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Replica getReplicas(int index); + + /** + * + * + *
+   * Output only. The replicas for the catalog metadata.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getReplicasCount(); + + /** + * + * + *
+   * Output only. The replicas for the catalog metadata.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getReplicasOrBuilderList(); + + /** + * + * + *
+   * Output only. The replicas for the catalog metadata.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog.Replica replicas = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveCatalog.ReplicaOrBuilder getReplicasOrBuilder(int index); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveDatabase.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveDatabase.java new file mode 100644 index 000000000000..5f0182661be2 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveDatabase.java @@ -0,0 +1,1406 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Stores the hive database information. It includes the database name,
+ * description, location and properties associated with the database.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.HiveDatabase} + */ +@com.google.protobuf.Generated +public final class HiveDatabase extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.HiveDatabase) + HiveDatabaseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "HiveDatabase"); + } + + // Use HiveDatabase.newBuilder() to construct. + private HiveDatabase(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private HiveDatabase() { + name_ = ""; + description_ = ""; + locationUri_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.HiveDatabase.class, + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Output only. The resource name.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The resource name.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESCRIPTION_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object description_ = ""; + + /** + * + * + *
+   * Optional. Stores the database description.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + @java.lang.Override + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Stores the database description.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LOCATION_URI_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object locationUri_ = ""; + + /** + * + * + *
+   * Optional. The Cloud Storage location path where the database exists.
+   * Format: `gs://bucket/path/to/database`
+   * If unspecified, the database will be stored in the catalog location.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The locationUri. + */ + @java.lang.Override + public java.lang.String getLocationUri() { + java.lang.Object ref = locationUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationUri_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The Cloud Storage location path where the database exists.
+   * Format: `gs://bucket/path/to/database`
+   * If unspecified, the database will be stored in the catalog location.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for locationUri. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationUriBytes() { + java.lang.Object ref = locationUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARAMETERS_FIELD_NUMBER = 4; + + private static final class ParametersDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_ParametersEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField(ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+   * Optional. Stores the properties associated with the database.
+   * The maximum size is 2 MiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+   * Optional. Stores the properties associated with the database.
+   * The maximum size is 2 MiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+   * Optional. Stores the properties associated with the database.
+   * The maximum size is 2 MiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * Optional. Stores the properties associated with the database.
+   * The maximum size is 2 MiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(description_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, description_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(locationUri_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, locationUri_); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetParameters(), ParametersDefaultEntryHolder.defaultEntry, 4); + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(description_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, description_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(locationUri_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, locationUri_); + } + for (java.util.Map.Entry entry : + internalGetParameters().getMap().entrySet()) { + com.google.protobuf.MapEntry parameters__ = + ParametersDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, parameters__); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.HiveDatabase)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.HiveDatabase other = + (com.google.cloud.biglake.hive.v1beta.HiveDatabase) obj; + + if (!getName().equals(other.getName())) return false; + if (!getDescription().equals(other.getDescription())) return false; + if (!getLocationUri().equals(other.getLocationUri())) return false; + if (!internalGetParameters().equals(other.internalGetParameters())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; + hash = (53 * hash) + getDescription().hashCode(); + hash = (37 * hash) + LOCATION_URI_FIELD_NUMBER; + hash = (53 * hash) + getLocationUri().hashCode(); + if (!internalGetParameters().getMap().isEmpty()) { + hash = (37 * hash) + PARAMETERS_FIELD_NUMBER; + hash = (53 * hash) + internalGetParameters().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.biglake.hive.v1beta.HiveDatabase prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Stores the hive database information. It includes the database name,
+   * description, location and properties associated with the database.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.HiveDatabase} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.HiveDatabase) + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetMutableParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.HiveDatabase.class, + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.HiveDatabase.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + description_ = ""; + locationUri_ = ""; + internalGetMutableParameters().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveDatabase getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveDatabase build() { + com.google.cloud.biglake.hive.v1beta.HiveDatabase result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveDatabase buildPartial() { + com.google.cloud.biglake.hive.v1beta.HiveDatabase result = + new com.google.cloud.biglake.hive.v1beta.HiveDatabase(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.HiveDatabase result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.description_ = description_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.locationUri_ = locationUri_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.parameters_ = internalGetParameters(); + result.parameters_.makeImmutable(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.HiveDatabase) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.HiveDatabase) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.HiveDatabase other) { + if (other == com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getDescription().isEmpty()) { + description_ = other.description_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getLocationUri().isEmpty()) { + locationUri_ = other.locationUri_; + bitField0_ |= 0x00000004; + onChanged(); + } + internalGetMutableParameters().mergeFrom(other.internalGetParameters()); + bitField0_ |= 0x00000008; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + description_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + locationUri_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.protobuf.MapEntry parameters__ = + input.readMessage( + ParametersDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableParameters() + .getMutableMap() + .put(parameters__.getKey(), parameters__.getValue()); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object description_ = ""; + + /** + * + * + *
+     * Optional. Stores the database description.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Stores the database description.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Stores the database description.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The description to set. + * @return This builder for chaining. + */ + public Builder setDescription(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + description_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Stores the database description.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDescription() { + description_ = getDefaultInstance().getDescription(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Stores the database description.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for description to set. + * @return This builder for chaining. + */ + public Builder setDescriptionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + description_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object locationUri_ = ""; + + /** + * + * + *
+     * Optional. The Cloud Storage location path where the database exists.
+     * Format: `gs://bucket/path/to/database`
+     * If unspecified, the database will be stored in the catalog location.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The locationUri. + */ + public java.lang.String getLocationUri() { + java.lang.Object ref = locationUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The Cloud Storage location path where the database exists.
+     * Format: `gs://bucket/path/to/database`
+     * If unspecified, the database will be stored in the catalog location.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for locationUri. + */ + public com.google.protobuf.ByteString getLocationUriBytes() { + java.lang.Object ref = locationUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The Cloud Storage location path where the database exists.
+     * Format: `gs://bucket/path/to/database`
+     * If unspecified, the database will be stored in the catalog location.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The locationUri to set. + * @return This builder for chaining. + */ + public Builder setLocationUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + locationUri_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The Cloud Storage location path where the database exists.
+     * Format: `gs://bucket/path/to/database`
+     * If unspecified, the database will be stored in the catalog location.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLocationUri() { + locationUri_ = getDefaultInstance().getLocationUri(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The Cloud Storage location path where the database exists.
+     * Format: `gs://bucket/path/to/database`
+     * If unspecified, the database will be stored in the catalog location.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for locationUri to set. + * @return This builder for chaining. + */ + public Builder setLocationUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + locationUri_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField + internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + private com.google.protobuf.MapField + internalGetMutableParameters() { + if (parameters_ == null) { + parameters_ = + com.google.protobuf.MapField.newMapField(ParametersDefaultEntryHolder.defaultEntry); + } + if (!parameters_.isMutable()) { + parameters_ = parameters_.copy(); + } + bitField0_ |= 0x00000008; + onChanged(); + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the database.
+     * The maximum size is 2 MiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the database.
+     * The maximum size is 2 MiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the database.
+     * The maximum size is 2 MiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the database.
+     * The maximum size is 2 MiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearParameters() { + bitField0_ = (bitField0_ & ~0x00000008); + internalGetMutableParameters().getMutableMap().clear(); + return this; + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the database.
+     * The maximum size is 2 MiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableParameters().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableParameters() { + bitField0_ |= 0x00000008; + return internalGetMutableParameters().getMutableMap(); + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the database.
+     * The maximum size is 2 MiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putParameters(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableParameters().getMutableMap().put(key, value); + bitField0_ |= 0x00000008; + return this; + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the database.
+     * The maximum size is 2 MiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllParameters(java.util.Map values) { + internalGetMutableParameters().getMutableMap().putAll(values); + bitField0_ |= 0x00000008; + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.HiveDatabase) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.HiveDatabase) + private static final com.google.cloud.biglake.hive.v1beta.HiveDatabase DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.HiveDatabase(); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveDatabase getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public HiveDatabase parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveDatabase getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveDatabaseOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveDatabaseOrBuilder.java new file mode 100644 index 000000000000..955c6a5298d7 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveDatabaseOrBuilder.java @@ -0,0 +1,195 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface HiveDatabaseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.HiveDatabase) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. The resource name.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Output only. The resource name.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Optional. Stores the database description.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + java.lang.String getDescription(); + + /** + * + * + *
+   * Optional. Stores the database description.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + com.google.protobuf.ByteString getDescriptionBytes(); + + /** + * + * + *
+   * Optional. The Cloud Storage location path where the database exists.
+   * Format: `gs://bucket/path/to/database`
+   * If unspecified, the database will be stored in the catalog location.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The locationUri. + */ + java.lang.String getLocationUri(); + + /** + * + * + *
+   * Optional. The Cloud Storage location path where the database exists.
+   * Format: `gs://bucket/path/to/database`
+   * If unspecified, the database will be stored in the catalog location.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string location_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for locationUri. + */ + com.google.protobuf.ByteString getLocationUriBytes(); + + /** + * + * + *
+   * Optional. Stores the properties associated with the database.
+   * The maximum size is 2 MiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getParametersCount(); + + /** + * + * + *
+   * Optional. Stores the properties associated with the database.
+   * The maximum size is 2 MiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsParameters(java.lang.String key); + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Deprecated + java.util.Map getParameters(); + + /** + * + * + *
+   * Optional. Stores the properties associated with the database.
+   * The maximum size is 2 MiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getParametersMap(); + + /** + * + * + *
+   * Optional. Stores the properties associated with the database.
+   * The maximum size is 2 MiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + /* nullable */ + java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
+   * Optional. Stores the properties associated with the database.
+   * The maximum size is 2 MiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getParametersOrThrow(java.lang.String key); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreProto.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreProto.java new file mode 100644 index 000000000000..6fede320842f --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveMetastoreProto.java @@ -0,0 +1,988 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public final class HiveMetastoreProto extends com.google.protobuf.GeneratedFile { + private HiveMetastoreProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "HiveMetastoreProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_Replica_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_Replica_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveCatalogRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveCatalogRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_GetHiveCatalogRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_GetHiveCatalogRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveCatalogRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveCatalogRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveCatalogRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveCatalogRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_ParametersEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_ParametersEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveDatabaseRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveDatabaseRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_GetHiveDatabaseRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_GetHiveDatabaseRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveDatabaseRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveDatabaseRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveDatabaseRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveDatabaseRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_HiveTable_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_HiveTable_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_HiveTable_ParametersEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_HiveTable_ParametersEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_FieldSchema_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_FieldSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_Order_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_Order_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedColumnValue_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedColumnValue_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedKeyValuesLocation_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedKeyValuesLocation_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_ParametersEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_ParametersEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_ParametersEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_ParametersEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveTableRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveTableRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_GetHiveTableRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_GetHiveTableRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveTableRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveTableRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveTableRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveTableRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_Partition_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_Partition_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_Partition_ParametersEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_Partition_ParametersEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_PartitionValues_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_PartitionValues_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_CreatePartitionRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_CreatePartitionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_BatchDeletePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_BatchDeletePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_UpdatePartitionRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_UpdatePartitionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "5google/cloud/biglake/hive/v1beta/hive_metastore.proto\022 google.cloud.biglake.hi" + + "ve.v1beta\032\034google/api/annotations.proto\032" + + "\027google/api/client.proto\032\037google/api/fie" + + "ld_behavior.proto\032\031google/api/resource.proto\032\033google/protobuf/empty.proto\032" + + " google/protobuf/field_mask.proto\032\037google/protobuf/timestamp.proto\"\376\003\n" + + "\013HiveCatalog\0224\n" + + "\004name\030\001 \001(\tB&\340A\003\372A \n" + + "\036biglake.googleapis.com/Catalog\022\030\n" + + "\013description\030\002 \001(\tB\003\340A\001\022\031\n" + + "\014location_uri\030\003 \001(\tB\003\340A\002\022L\n" + + "\010replicas\030\004 \003(" + + "\01325.google.cloud.biglake.hive.v1beta.HiveCatalog.ReplicaB\003\340A\003\032\326\001\n" + + "\007Replica\022\023\n" + + "\006region\030\001 \001(\tB\003\340A\003\022O\n" + + "\005state\030\002 \001(\0162;.google.c" + + "loud.biglake.hive.v1beta.HiveCatalog.Replica.StateB\003\340A\003\"e\n" + + "\005State\022\025\n" + + "\021STATE_UNSPECIFIED\020\000\022\021\n\r" + + "STATE_PRIMARY\020\001\022\035\n" + + "\031STATE_PRIMARY_IN_PROGRESS\020\002\022\023\n" + + "\017STATE_SECONDARY\020\003:]\352AZ\n" + + "\036biglake.googleapis.com/Catalog\022%pro" + + "jects/{project}/catalogs/{catalog}*\010catalogs2\007catalog\"\370\001\n" + + "\030CreateHiveCatalogRequest\022C\n" + + "\006parent\030\001 \001(\tB3\340A\002\372A-\n" + + "+cloudresourcemanager.googleapis.com/Project\022H\n" + + "\014hive_catalog\030\002" + + " \001(\0132-.google.cloud.biglake.hive.v1beta.HiveCatalogB\003\340A\002\022\034\n" + + "\017hive_catalog_id\030\003 \001(\tB\003\340A\002\022/\n" + + "\020primary_location\030\004 \001(\tB\003\340A\002R\020primary_location\"M\n" + + "\025GetHiveCatalogRequest\0224\n" + + "\004name\030\001 \001(\tB&\340A\002\372A \n" + + "\036biglake.googleapis.com/Catalog\"\217\001\n" + + "\027ListHiveCatalogsRequest\022C\n" + + "\006parent\030\001 \001(\tB3\340A\002\372A-\n" + + "+cloudresourcemanager.googleapis.com/Project\022\026\n" + + "\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\n" + + "page_token\030\003 \001(\tB\003\340A\001\"\230\001\n" + + "\030ListHiveCatalogsResponse\022D\n" + + "\010catalogs\030\001" + + " \003(\0132-.google.cloud.biglake.hive.v1beta.HiveCatalogB\003\340A\003\022\034\n" + + "\017next_page_token\030\002 \001(\tB\003\340A\003\022\030\n" + + "\013unreachable\030\003 \003(\tB\003\340A\003\"\232\001\n" + + "\030UpdateHiveCatalogRequest\022H\n" + + "\014hive_catalog\030\001" + + " \001(\0132-.google.cloud.biglake.hive.v1beta.HiveCatalogB\003\340A\002\0224\n" + + "\013update_mask\030\002 \001(\0132\032.google.protobuf.FieldMaskB\003\340A\001\"P\n" + + "\030DeleteHiveCatalogRequest\0224\n" + + "\004name\030\001 \001(\tB&\340A\002\372A \n" + + "\036biglake.googleapis.com/Catalog\"\201\003\n" + + "\014HiveDatabase\0226\n" + + "\004name\030\001 \001(\tB(\340A\003\372A\"\n" + + " biglake.googleapis.com/Namespace\022\030\n" + + "\013description\030\002 \001(\tB\003\340A\001\022\031\n" + + "\014location_uri\030\003 \001(\tB\003\340A\001\022W\n\n" + + "parameters\030\004 \003(\0132>.google.c" + + "loud.biglake.hive.v1beta.HiveDatabase.ParametersEntryB\003\340A\001\0321\n" + + "\017ParametersEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001:x\352Au\n" + + " biglake.googleapis.com/Namespace\022:projects/{" + + "project}/catalogs/{catalog}/databases/{database}*\n" + + "namespaces2\tnamespace\"\276\001\n" + + "\031CreateHiveDatabaseRequest\0226\n" + + "\006parent\030\001 \001(\tB&\340A\002\372A \n" + + "\036biglake.googleapis.com/Catalog\022J\n" + + "\r" + + "hive_database\030\002" + + " \001(\0132..google.cloud.biglake.hive.v1beta.HiveDatabaseB\003\340A\002\022\035\n" + + "\020hive_database_id\030\003 \001(\tB\003\340A\002\"P\n" + + "\026GetHiveDatabaseRequest\0226\n" + + "\004name\030\001 \001(\tB(\340A\002\372A\"\n" + + " biglake.googleapis.com/Namespace\"\203\001\n" + + "\030ListHiveDatabasesRequest\0226\n" + + "\006parent\030\001 \001(\tB&\340A\002\372A \n" + + "\036biglake.googleapis.com/Catalog\022\026\n" + + "\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\n" + + "page_token\030\003 \001(\tB\003\340A\001\"\201\001\n" + + "\031ListHiveDatabasesResponse\022F\n" + + "\tdatabases\030\001" + + " \003(\0132..google.cloud.biglake.hive.v1beta.HiveDatabaseB\003\340A\003\022\034\n" + + "\017next_page_token\030\002 \001(\tB\003\340A\003\"\235\001\n" + + "\031UpdateHiveDatabaseRequest\022J\n\r" + + "hive_database\030\001 \001(\0132..google.cloud" + + ".biglake.hive.v1beta.HiveDatabaseB\003\340A\002\0224\n" + + "\013update_mask\030\002 \001(\0132\032.google.protobuf.FieldMaskB\003\340A\001\"S\n" + + "\031DeleteHiveDatabaseRequest\0226\n" + + "\004name\030\001 \001(\tB(\340A\002\372A\"\n" + + " biglake.googleapis.com/Namespace\"\320\004\n" + + "\tHiveTable\0222\n" + + "\004name\030\001 \001(\tB$\340A\003\372A\036\n" + + "\034biglake.googleapis.com/Table\022\030\n" + + "\013description\030\002 \001(\tB\003\340A\001\022T\n" + + "\022storage_descriptor\030\003" + + " \001(\01323.google.cloud.biglake.hive.v1beta.StorageDescriptorB\003\340A\002\0224\n" + + "\013create_time\030\004 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022J\n" + + "\016partition_keys\030\007 \003(\0132-.goog" + + "le.cloud.biglake.hive.v1beta.FieldSchemaB\003\340A\001\022T\n\n" + + "parameters\030\010 \003(\0132;.google.cloud" + + ".biglake.hive.v1beta.HiveTable.ParametersEntryB\003\340A\001\022\027\n\n" + + "table_type\030\013 \001(\tB\003\340A\003\0321\n" + + "\017ParametersEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001:{\352Ax\n" + + "\034biglake.googleapis.com/Table\022Iprojects/{project}/catalogs/{catalog" + + "}/databases/{database}/tables/{table}*\006tables2\005table\"I\n" + + "\013FieldSchema\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\002\022\021\n" + + "\004type\030\002 \001(\tB\003\340A\002\022\024\n" + + "\007comment\030\003 \001(\tB\003\340A\001\"\244\t\n" + + "\021StorageDescriptor\022C\n" + + "\007columns\030\001" + + " \003(\0132-.google.cloud.biglake.hive.v1beta.FieldSchemaB\003\340A\002\022\031\n" + + "\014location_uri\030\002 \001(\tB\003\340A\001\022\031\n" + + "\014input_format\030\003 \001(\tB\003\340A\001\022\032\n\r" + + "output_format\030\004 \001(\tB\003\340A\001\022\034\n\n" + + "compressed\030\005 \001(\010B\003\340A\001H\000\210\001\001\022\035\n" + + "\013num_buckets\030\006 \001(\005B\003\340A\001H\001\210\001\001\022D\n\n" + + "serde_info\030\007" + + " \001(\0132+.google.cloud.biglake.hive.v1beta.SerdeInfoB\003\340A\001\022\030\n" + + "\013bucket_cols\030\010 \003(\tB\003\340A\001\022Q\n" + + "\tsort_cols\030\t \003(\01329.g" + + "oogle.cloud.biglake.hive.v1beta.StorageDescriptor.OrderB\003\340A\001\022\\\n\n" + + "parameters\030\n" + + " \003(\013" + + "2C.google.cloud.biglake.hive.v1beta.StorageDescriptor.ParametersEntryB\003\340A\001\022X\n" + + "\013skewed_info\030\013 \001(\0132>.google.cloud.biglake.h" + + "ive.v1beta.StorageDescriptor.SkewedInfoB\003\340A\001\022$\n" + + "\022stored_as_sub_dirs\030\014 \001(\010B\003\340A\001H\002\210\001\001\032-\n" + + "\005Order\022\020\n" + + "\003col\030\001 \001(\tB\003\340A\002\022\022\n" + + "\005order\030\002 \001(\005B\003\340A\002\032\221\003\n\n" + + "SkewedInfo\022\035\n" + + "\020skewed_col_names\030\001 \003(\tB\003\340A\002\022p\n" + + "\021skewed_col_values\030\002 \003(\0132P.google.cloud.biglake.hive.v1beta.St" + + "orageDescriptor.SkewedInfo.SkewedColumnValueB\003\340A\002\022\200\001\n" + + "\033skewed_key_values_locations\030\003 \003(\0132V.google.cloud.biglake.hive.v1be" + + "ta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocationB\003\340A\002\032(\n" + + "\021SkewedColumnValue\022\023\n" + + "\006values\030\001 \003(\tB\003\340A\002\032E\n" + + "\027SkewedKeyValuesLocation\022\023\n" + + "\006values\030\001 \003(\tB\003\340A\002\022\025\n" + + "\010location\030\002 \001(\tB\003\340A\002\0321\n" + + "\017ParametersEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001B\r\n" + + "\013_compressedB\016\n" + + "\014_num_bucketsB\025\n" + + "\023_stored_as_sub_dirs\"\271\003\n" + + "\tSerdeInfo\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\002\022\036\n" + + "\021serialization_lib\030\002 \001(\tB\003\340A\002\022\030\n" + + "\013description\030\003 \001(\tB\003\340A\001\022T\n\n" + + "parameters\030\004 \003(\0132;.google.c" + + "loud.biglake.hive.v1beta.SerdeInfo.ParametersEntryB\003\340A\001\022\035\n" + + "\020serializer_class\030\005 \001(\tB\003\340A\001\022\037\n" + + "\022deserializer_class\030\006 \001(\tB\003\340A\001\022N\n\n" + + "serde_type\030\007 \001(\01625.google.cloud.bigla" + + "ke.hive.v1beta.SerdeInfo.SerdeTypeB\003\340A\001\0321\n" + + "\017ParametersEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001\"F\n" + + "\tSerdeType\022\032\n" + + "\026SERDE_TYPE_UNSPECIFIED\020\000\022\010\n" + + "\004HIVE\020\001\022\023\n" + + "\017SCHEMA_REGISTRY\020\002\"\264\001\n" + + "\026CreateHiveTableRequest\0228\n" + + "\006parent\030\001 \001(\tB(\340A\002\372A\"\n" + + " biglake.googleapis.com/Namespace\022D\n\n" + + "hive_table\030\002" + + " \001(\0132+.google.cloud.biglake.hive.v1beta.HiveTableB\003\340A\002\022\032\n" + + "\r" + + "hive_table_id\030\003 \001(\tB\003\340A\002\"I\n" + + "\023GetHiveTableRequest\0222\n" + + "\004name\030\001 \001(\tB$\340A\002\372A\036\n" + + "\034biglake.googleapis.com/Table\"\202\001\n" + + "\025ListHiveTablesRequest\0228\n" + + "\006parent\030\001 \001(\tB(\340A\002\372A\"\n" + + " biglake.googleapis.com/Namespace\022\026\n" + + "\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\n" + + "page_token\030\003 \001(\tB\003\340A\001\"x\n" + + "\026ListHiveTablesResponse\022@\n" + + "\006tables\030\001 \003(\0132+.go" + + "ogle.cloud.biglake.hive.v1beta.HiveTableB\003\340A\003\022\034\n" + + "\017next_page_token\030\002 \001(\tB\003\340A\003\"\224\001\n" + + "\026UpdateHiveTableRequest\022D\n\n" + + "hive_table\030\001 \001" + + "(\0132+.google.cloud.biglake.hive.v1beta.HiveTableB\003\340A\002\0224\n" + + "\013update_mask\030\002 \001(\0132\032.google.protobuf.FieldMaskB\003\340A\001\"L\n" + + "\026DeleteHiveTableRequest\0222\n" + + "\004name\030\001 \001(\tB$\340A\002\372A\036\n" + + "\034biglake.googleapis.com/Table\"\371\002\n" + + "\tPartition\022\023\n" + + "\006values\030\001 \003(\tB\003\340A\002\0224\n" + + "\013create_time\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022T\n" + + "\022storage_descriptor\030\003 \001(\01323.google.cloud.big" + + "lake.hive.v1beta.StorageDescriptorB\003\340A\001\022T\n\n" + + "parameters\030\004 \003(\0132;.google.cloud.bigla" + + "ke.hive.v1beta.Partition.ParametersEntryB\003\340A\001\022B\n" + + "\006fields\030\005" + + " \003(\0132-.google.cloud.biglake.hive.v1beta.FieldSchemaB\003\340A\001\0321\n" + + "\017ParametersEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001\"&\n" + + "\017PartitionValues\022\023\n" + + "\006values\030\001 \003(\tB\003\340A\002\"\223\001\n" + + "\026CreatePartitionRequest\0224\n" + + "\006parent\030\001 \001(\tB$\340A\002\372A\036\n" + + "\034biglake.googleapis.com/Table\022C\n" + + "\tpartition\030\002" + + " \001(\0132+.google.cloud.biglake.hive.v1beta.PartitionB\003\340A\002\"\314\001\n" + + "\034BatchCreatePartitionsRequest\0224\n" + + "\006parent\030\001 \001(\tB$\340A\002\372A\036\n" + + "\034biglake.googleapis.com/Table\022O\n" + + "\010requests\030\002 \003(\01328.google.cloud.bigla" + + "ke.hive.v1beta.CreatePartitionRequestB\003\340A\002\022%\n" + + "\030skip_existing_partitions\030\003 \001(\010B\003\340A\001\"`\n" + + "\035BatchCreatePartitionsResponse\022?\n\n" + + "partitions\030\001 \003(\0132+.google.cloud.biglake.hive.v1beta.Partition\"\246\001\n" + + "\034BatchDeletePartitionsRequest\0224\n" + + "\006parent\030\001 \001(\tB$\340A\002\372A\036\n" + + "\034biglake.googleapis.com/Table\022P\n" + + "\020partition_values\030\002" + + " \003(\01321.google.cloud.biglake.hive.v1beta.PartitionValuesB\003\340A\002\"\223\001\n" + + "\026UpdatePartitionRequest\022C\n" + + "\tpartition\030\001 \001(\0132+.goo" + + "gle.cloud.biglake.hive.v1beta.PartitionB\003\340A\002\0224\n" + + "\013update_mask\030\002" + + " \001(\0132\032.google.protobuf.FieldMaskB\003\340A\001\"\245\001\n" + + "\034BatchUpdatePartitionsRequest\0224\n" + + "\006parent\030\001 \001(\tB$\340A\002\372A\036\n" + + "\034biglake.googleapis.com/Table\022O\n" + + "\010requests\030\002 " + + "\003(\01328.google.cloud.biglake.hive.v1beta.UpdatePartitionRequestB\003\340A\002\"`\n" + + "\035BatchUpdatePartitionsResponse\022?\n\n" + + "partitions\030\001 \003(\0132+.google.cloud.biglake.hive.v1beta.Partition\"b\n" + + "\025ListPartitionsRequest\0224\n" + + "\006parent\030\001 \001(\tB$\340A\002\372A\036\n" + + "\034biglake.googleapis.com/Table\022\023\n" + + "\006filter\030\002 \001(\tB\003\340A\001\"^\n" + + "\026ListPartitionsResponse\022D\n\n" + + "partitions\030\001 \003(\0132+.google." + + "cloud.biglake.hive.v1beta.PartitionB\003\340A\0032\317!\n" + + "\024HiveMetastoreService\022\345\001\n" + + "\021CreateHiveCatalog\022:.google.cloud.biglake.hive.v1be" + + "ta.CreateHiveCatalogRequest\032-.google.clo" + + "ud.biglake.hive.v1beta.HiveCatalog\"e\332A#p" + + "arent,hive_catalog,hive_catalog_id\202\323\344\223\0029" + + "\")/hive/v1beta/{parent=projects/*}/catalogs:\014hive_catalog\022\262\001\n" + + "\016GetHiveCatalog\0227.google.cloud.biglake.hive.v1beta.GetHiveC" + + "atalogRequest\032-.google.cloud.biglake.hiv" + + "e.v1beta.HiveCatalog\"8\332A\004name\202\323\344\223\002+\022)/hi" + + "ve/v1beta/{name=projects/*/catalogs/*}\022\305\001\n" + + "\020ListHiveCatalogs\0229.google.cloud.bigla" + + "ke.hive.v1beta.ListHiveCatalogsRequest\032:.google.cloud.biglake.hive.v1beta.ListHi" + + "veCatalogsResponse\":\332A\006parent\202\323\344\223\002+\022)/hi" + + "ve/v1beta/{parent=projects/*}/catalogs\022\347\001\n" + + "\021UpdateHiveCatalog\022:.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest" + + "\032-.google.cloud.biglake.hive.v1beta.Hive" + + "Catalog\"g\332A\030hive_catalog,update_mask\202\323\344\223" + + "\002F26/hive/v1beta/{hive_catalog.name=projects/*/catalogs/*}:\014hive_catalog\022\241\001\n" + + "\021DeleteHiveCatalog\022:.google.cloud.biglake.hi" + + "ve.v1beta.DeleteHiveCatalogRequest\032\026.goo" + + "gle.protobuf.Empty\"8\332A\004name\202\323\344\223\002+*)/hive/v1beta/{name=projects/*/catalogs/*}\022\367\001\n" + + "\022CreateHiveDatabase\022;.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest" + + "\032..google.cloud.biglake.hive.v1beta.Hive" + + "Database\"t\332A%parent,hive_database,hive_d" + + "atabase_id\202\323\344\223\002F\"5/hive/v1beta/{parent=projects/*/catalogs/*}/databases:\r" + + "hive_database\022\301\001\n" + + "\017GetHiveDatabase\0228.google.cloud.biglake.hive.v1beta.GetHiveDatabaseReq" + + "uest\032..google.cloud.biglake.hive.v1beta." + + "HiveDatabase\"D\332A\004name\202\323\344\223\0027\0225/hive/v1bet" + + "a/{name=projects/*/catalogs/*/databases/*}\022\324\001\n" + + "\021ListHiveDatabases\022:.google.cloud.biglake.hive.v1beta.ListHiveDatabasesReq" + + "uest\032;.google.cloud.biglake.hive.v1beta." + + "ListHiveDatabasesResponse\"F\332A\006parent\202\323\344\223" + + "\0027\0225/hive/v1beta/{parent=projects/*/catalogs/*}/databases\022\371\001\n" + + "\022UpdateHiveDatabase\022;.google.cloud.biglake.hive.v1beta.Upda" + + "teHiveDatabaseRequest\032..google.cloud.big" + + "lake.hive.v1beta.HiveDatabase\"v\332A\031hive_d" + + "atabase,update_mask\202\323\344\223\002T2C/hive/v1beta/" + + "{hive_database.name=projects/*/catalogs/*/databases/*}:\r" + + "hive_database\022\257\001\n" + + "\022DeleteHiveDatabase\022;.google.cloud.biglake.hive" + + ".v1beta.DeleteHiveDatabaseRequest\032\026.goog" + + "le.protobuf.Empty\"D\332A\004name\202\323\344\223\0027*5/hive/" + + "v1beta/{name=projects/*/catalogs/*/databases/*}\022\356\001\n" + + "\017CreateHiveTable\0228.google.cloud.biglake.hive.v1beta.CreateHiveTableRe" + + "quest\032+.google.cloud.biglake.hive.v1beta" + + ".HiveTable\"t\332A\037parent,hive_table,hive_ta" + + "ble_id\202\323\344\223\002L\">/hive/v1beta/{parent=projects/*/catalogs/*/databases/*}/tables:\n" + + "hive_table\022\301\001\n" + + "\014GetHiveTable\0225.google.cloud.biglake.hive.v1beta.GetHiveTableRequest" + + "\032+.google.cloud.biglake.hive.v1beta.Hive" + + "Table\"M\332A\004name\202\323\344\223\002@\022>/hive/v1beta/{name" + + "=projects/*/catalogs/*/databases/*/tables/*}\022\324\001\n" + + "\016ListHiveTables\0227.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest" + + "\0328.google.cloud.biglake.hive.v1beta.List" + + "HiveTablesResponse\"O\332A\006parent\202\323\344\223\002@\022>/hi" + + "ve/v1beta/{parent=projects/*/catalogs/*/databases/*}/tables\022\360\001\n" + + "\017UpdateHiveTable\0228.google.cloud.biglake.hive.v1beta.Updat" + + "eHiveTableRequest\032+.google.cloud.biglake" + + ".hive.v1beta.HiveTable\"v\332A\026hive_table,up" + + "date_mask\202\323\344\223\002W2I/hive/v1beta/{hive_tabl" + + "e.name=projects/*/catalogs/*/databases/*/tables/*}:\n" + + "hive_table\022\262\001\n" + + "\017DeleteHiveTable\0228.google.cloud.biglake.hive.v1beta.De" + + "leteHiveTableRequest\032\026.google.protobuf.E" + + "mpty\"M\332A\004name\202\323\344\223\002@*>/hive/v1beta/{name=" + + "projects/*/catalogs/*/databases/*/tables/*}\022\205\002\n" + + "\025BatchCreatePartitions\022>.google.cloud.biglake.hive.v1beta.BatchCreatePart" + + "itionsRequest\032?.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse\"k" + + "\332A\006parent\202\323\344\223\002\\\"W/hive/v1beta/{parent=pr" + + "ojects/*/catalogs/*/databases/*/tables/*}/partitions:batchCreate:\001*\022\334\001\n" + + "\025BatchDeletePartitions\022>.google.cloud.biglake.hiv" + + "e.v1beta.BatchDeletePartitionsRequest\032\026." + + "google.protobuf.Empty\"k\332A\006parent\202\323\344\223\002\\\"W" + + "/hive/v1beta/{parent=projects/*/catalogs" + + "/*/databases/*/tables/*}/partitions:batchDelete:\001*\022\205\002\n" + + "\025BatchUpdatePartitions\022>.google.cloud.biglake.hive.v1beta.BatchUpd" + + "atePartitionsRequest\032?.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRes" + + "ponse\"k\332A\006parent\202\323\344\223\002\\\"W/hive/v1beta/{pa" + + "rent=projects/*/catalogs/*/databases/*/tables/*}/partitions:batchUpdate:\001*\022\350\001\n" + + "\016ListPartitions\0227.google.cloud.biglake.hiv" + + "e.v1beta.ListPartitionsRequest\0328.google.cloud.biglake.hive.v1beta.ListPartitions" + + "Response\"a\332A\006parent\202\323\344\223\002R\022P/hive/v1beta/" + + "{parent=projects/*/catalogs/*/databases/" + + "*/tables/*}/partitions:list0\001\032s\312A\026biglak" + + "e.googleapis.com\322AWhttps://www.googleapi" + + "s.com/auth/bigquery,https://www.googleapis.com/auth/cloud-platformBv\n" + + "$com.google.cloud.biglake.hive.v1betaB\022HiveMetastor" + + "eProtoP\001Z8cloud.google.com/go/biglake/hive/apiv1beta/hivepb;hivepbb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }); + internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_descriptor = + getDescriptor().getMessageType(0); + internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_descriptor, + new java.lang.String[] { + "Name", "Description", "LocationUri", "Replicas", + }); + internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_Replica_descriptor = + internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_descriptor.getNestedType(0); + internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_Replica_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_HiveCatalog_Replica_descriptor, + new java.lang.String[] { + "Region", "State", + }); + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveCatalogRequest_descriptor = + getDescriptor().getMessageType(1); + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveCatalogRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveCatalogRequest_descriptor, + new java.lang.String[] { + "Parent", "HiveCatalog", "HiveCatalogId", "PrimaryLocation", + }); + internal_static_google_cloud_biglake_hive_v1beta_GetHiveCatalogRequest_descriptor = + getDescriptor().getMessageType(2); + internal_static_google_cloud_biglake_hive_v1beta_GetHiveCatalogRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_GetHiveCatalogRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsRequest_descriptor = + getDescriptor().getMessageType(3); + internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", + }); + internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsResponse_descriptor = + getDescriptor().getMessageType(4); + internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsResponse_descriptor, + new java.lang.String[] { + "Catalogs", "NextPageToken", "Unreachable", + }); + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveCatalogRequest_descriptor = + getDescriptor().getMessageType(5); + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveCatalogRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveCatalogRequest_descriptor, + new java.lang.String[] { + "HiveCatalog", "UpdateMask", + }); + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveCatalogRequest_descriptor = + getDescriptor().getMessageType(6); + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveCatalogRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveCatalogRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_descriptor = + getDescriptor().getMessageType(7); + internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_descriptor, + new java.lang.String[] { + "Name", "Description", "LocationUri", "Parameters", + }); + internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_ParametersEntry_descriptor = + internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_descriptor.getNestedType(0); + internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_ParametersEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_HiveDatabase_ParametersEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveDatabaseRequest_descriptor = + getDescriptor().getMessageType(8); + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveDatabaseRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveDatabaseRequest_descriptor, + new java.lang.String[] { + "Parent", "HiveDatabase", "HiveDatabaseId", + }); + internal_static_google_cloud_biglake_hive_v1beta_GetHiveDatabaseRequest_descriptor = + getDescriptor().getMessageType(9); + internal_static_google_cloud_biglake_hive_v1beta_GetHiveDatabaseRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_GetHiveDatabaseRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesRequest_descriptor = + getDescriptor().getMessageType(10); + internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", + }); + internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesResponse_descriptor = + getDescriptor().getMessageType(11); + internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesResponse_descriptor, + new java.lang.String[] { + "Databases", "NextPageToken", + }); + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveDatabaseRequest_descriptor = + getDescriptor().getMessageType(12); + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveDatabaseRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveDatabaseRequest_descriptor, + new java.lang.String[] { + "HiveDatabase", "UpdateMask", + }); + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveDatabaseRequest_descriptor = + getDescriptor().getMessageType(13); + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveDatabaseRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveDatabaseRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_biglake_hive_v1beta_HiveTable_descriptor = + getDescriptor().getMessageType(14); + internal_static_google_cloud_biglake_hive_v1beta_HiveTable_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_HiveTable_descriptor, + new java.lang.String[] { + "Name", + "Description", + "StorageDescriptor", + "CreateTime", + "PartitionKeys", + "Parameters", + "TableType", + }); + internal_static_google_cloud_biglake_hive_v1beta_HiveTable_ParametersEntry_descriptor = + internal_static_google_cloud_biglake_hive_v1beta_HiveTable_descriptor.getNestedType(0); + internal_static_google_cloud_biglake_hive_v1beta_HiveTable_ParametersEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_HiveTable_ParametersEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_biglake_hive_v1beta_FieldSchema_descriptor = + getDescriptor().getMessageType(15); + internal_static_google_cloud_biglake_hive_v1beta_FieldSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_FieldSchema_descriptor, + new java.lang.String[] { + "Name", "Type", "Comment", + }); + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_descriptor = + getDescriptor().getMessageType(16); + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_descriptor, + new java.lang.String[] { + "Columns", + "LocationUri", + "InputFormat", + "OutputFormat", + "Compressed", + "NumBuckets", + "SerdeInfo", + "BucketCols", + "SortCols", + "Parameters", + "SkewedInfo", + "StoredAsSubDirs", + }); + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_Order_descriptor = + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_descriptor.getNestedType( + 0); + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_Order_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_Order_descriptor, + new java.lang.String[] { + "Col", "Order", + }); + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_descriptor = + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_descriptor.getNestedType( + 1); + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_descriptor, + new java.lang.String[] { + "SkewedColNames", "SkewedColValues", "SkewedKeyValuesLocations", + }); + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedColumnValue_descriptor = + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_descriptor + .getNestedType(0); + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedColumnValue_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedColumnValue_descriptor, + new java.lang.String[] { + "Values", + }); + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedKeyValuesLocation_descriptor = + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_descriptor + .getNestedType(1); + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedKeyValuesLocation_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedKeyValuesLocation_descriptor, + new java.lang.String[] { + "Values", "Location", + }); + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_ParametersEntry_descriptor = + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_descriptor.getNestedType( + 2); + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_ParametersEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_ParametersEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_descriptor = + getDescriptor().getMessageType(17); + internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_descriptor, + new java.lang.String[] { + "Name", + "SerializationLib", + "Description", + "Parameters", + "SerializerClass", + "DeserializerClass", + "SerdeType", + }); + internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_ParametersEntry_descriptor = + internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_descriptor.getNestedType(0); + internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_ParametersEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_ParametersEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveTableRequest_descriptor = + getDescriptor().getMessageType(18); + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveTableRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_CreateHiveTableRequest_descriptor, + new java.lang.String[] { + "Parent", "HiveTable", "HiveTableId", + }); + internal_static_google_cloud_biglake_hive_v1beta_GetHiveTableRequest_descriptor = + getDescriptor().getMessageType(19); + internal_static_google_cloud_biglake_hive_v1beta_GetHiveTableRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_GetHiveTableRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesRequest_descriptor = + getDescriptor().getMessageType(20); + internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", + }); + internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesResponse_descriptor = + getDescriptor().getMessageType(21); + internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesResponse_descriptor, + new java.lang.String[] { + "Tables", "NextPageToken", + }); + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveTableRequest_descriptor = + getDescriptor().getMessageType(22); + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveTableRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveTableRequest_descriptor, + new java.lang.String[] { + "HiveTable", "UpdateMask", + }); + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveTableRequest_descriptor = + getDescriptor().getMessageType(23); + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveTableRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_DeleteHiveTableRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_biglake_hive_v1beta_Partition_descriptor = + getDescriptor().getMessageType(24); + internal_static_google_cloud_biglake_hive_v1beta_Partition_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_Partition_descriptor, + new java.lang.String[] { + "Values", "CreateTime", "StorageDescriptor", "Parameters", "Fields", + }); + internal_static_google_cloud_biglake_hive_v1beta_Partition_ParametersEntry_descriptor = + internal_static_google_cloud_biglake_hive_v1beta_Partition_descriptor.getNestedType(0); + internal_static_google_cloud_biglake_hive_v1beta_Partition_ParametersEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_Partition_ParametersEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_biglake_hive_v1beta_PartitionValues_descriptor = + getDescriptor().getMessageType(25); + internal_static_google_cloud_biglake_hive_v1beta_PartitionValues_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_PartitionValues_descriptor, + new java.lang.String[] { + "Values", + }); + internal_static_google_cloud_biglake_hive_v1beta_CreatePartitionRequest_descriptor = + getDescriptor().getMessageType(26); + internal_static_google_cloud_biglake_hive_v1beta_CreatePartitionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_CreatePartitionRequest_descriptor, + new java.lang.String[] { + "Parent", "Partition", + }); + internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsRequest_descriptor = + getDescriptor().getMessageType(27); + internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "Requests", "SkipExistingPartitions", + }); + internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsResponse_descriptor = + getDescriptor().getMessageType(28); + internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_BatchCreatePartitionsResponse_descriptor, + new java.lang.String[] { + "Partitions", + }); + internal_static_google_cloud_biglake_hive_v1beta_BatchDeletePartitionsRequest_descriptor = + getDescriptor().getMessageType(29); + internal_static_google_cloud_biglake_hive_v1beta_BatchDeletePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_BatchDeletePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "PartitionValues", + }); + internal_static_google_cloud_biglake_hive_v1beta_UpdatePartitionRequest_descriptor = + getDescriptor().getMessageType(30); + internal_static_google_cloud_biglake_hive_v1beta_UpdatePartitionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_UpdatePartitionRequest_descriptor, + new java.lang.String[] { + "Partition", "UpdateMask", + }); + internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsRequest_descriptor = + getDescriptor().getMessageType(31); + internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "Requests", + }); + internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsResponse_descriptor = + getDescriptor().getMessageType(32); + internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_BatchUpdatePartitionsResponse_descriptor, + new java.lang.String[] { + "Partitions", + }); + internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsRequest_descriptor = + getDescriptor().getMessageType(33); + internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "Filter", + }); + internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsResponse_descriptor = + getDescriptor().getMessageType(34); + internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsResponse_descriptor, + new java.lang.String[] { + "Partitions", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resource); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveTable.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveTable.java new file mode 100644 index 000000000000..7ca72d8b12ed --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveTable.java @@ -0,0 +1,2584 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Stores the hive table information. It includes the table name, schema (column
+ * names and types), data location, storage format, serde info, etc. This
+ * message closely matches the Table object in the IMetastoreClient
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.HiveTable} + */ +@com.google.protobuf.Generated +public final class HiveTable extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.HiveTable) + HiveTableOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "HiveTable"); + } + + // Use HiveTable.newBuilder() to construct. + private HiveTable(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private HiveTable() { + name_ = ""; + description_ = ""; + partitionKeys_ = java.util.Collections.emptyList(); + tableType_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveTable_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 8: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveTable_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.HiveTable.class, + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Output only. The resource name.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The resource name.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESCRIPTION_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object description_ = ""; + + /** + * + * + *
+   * Optional. Description of the table. The maximum length is 4000 characters.
+   * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + @java.lang.Override + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Description of the table. The maximum length is 4000 characters.
+   * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STORAGE_DESCRIPTOR_FIELD_NUMBER = 3; + private com.google.cloud.biglake.hive.v1beta.StorageDescriptor storageDescriptor_; + + /** + * + * + *
+   * Required. Storage descriptor of the table.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the storageDescriptor field is set. + */ + @java.lang.Override + public boolean hasStorageDescriptor() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. Storage descriptor of the table.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The storageDescriptor. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor getStorageDescriptor() { + return storageDescriptor_ == null + ? com.google.cloud.biglake.hive.v1beta.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } + + /** + * + * + *
+   * Required. Storage descriptor of the table.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder + getStorageDescriptorOrBuilder() { + return storageDescriptor_ == null + ? com.google.cloud.biglake.hive.v1beta.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } + + public static final int CREATE_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. The creation time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Output only. The creation time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. The creation time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int PARTITION_KEYS_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private java.util.List partitionKeys_; + + /** + * + * + *
+   * Optional. The partition keys of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getPartitionKeysList() { + return partitionKeys_; + } + + /** + * + * + *
+   * Optional. The partition keys of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getPartitionKeysOrBuilderList() { + return partitionKeys_; + } + + /** + * + * + *
+   * Optional. The partition keys of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getPartitionKeysCount() { + return partitionKeys_.size(); + } + + /** + * + * + *
+   * Optional. The partition keys of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.FieldSchema getPartitionKeys(int index) { + return partitionKeys_.get(index); + } + + /** + * + * + *
+   * Optional. The partition keys of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder getPartitionKeysOrBuilder( + int index) { + return partitionKeys_.get(index); + } + + public static final int PARAMETERS_FIELD_NUMBER = 8; + + private static final class ParametersDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveTable_ParametersEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField(ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+   * Optional. Stores the properties associated with the table. The maximum size
+   * is 4MiB.
+   * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+   * Optional. Stores the properties associated with the table. The maximum size
+   * is 4MiB.
+   * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+   * Optional. Stores the properties associated with the table. The maximum size
+   * is 4MiB.
+   * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * Optional. Stores the properties associated with the table. The maximum size
+   * is 4MiB.
+   * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int TABLE_TYPE_FIELD_NUMBER = 11; + + @SuppressWarnings("serial") + private volatile java.lang.Object tableType_ = ""; + + /** + * + * + *
+   * Output only. The type of the table. This is EXTERNAL for BigLake hive
+   * tables.
+   * 
+ * + * string table_type = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The tableType. + */ + @java.lang.Override + public java.lang.String getTableType() { + java.lang.Object ref = tableType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + tableType_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The type of the table. This is EXTERNAL for BigLake hive
+   * tables.
+   * 
+ * + * string table_type = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for tableType. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableTypeBytes() { + java.lang.Object ref = tableType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + tableType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(description_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, description_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getStorageDescriptor()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getCreateTime()); + } + for (int i = 0; i < partitionKeys_.size(); i++) { + output.writeMessage(7, partitionKeys_.get(i)); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetParameters(), ParametersDefaultEntryHolder.defaultEntry, 8); + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(tableType_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 11, tableType_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(description_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, description_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getStorageDescriptor()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCreateTime()); + } + for (int i = 0; i < partitionKeys_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, partitionKeys_.get(i)); + } + for (java.util.Map.Entry entry : + internalGetParameters().getMap().entrySet()) { + com.google.protobuf.MapEntry parameters__ = + ParametersDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, parameters__); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(tableType_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(11, tableType_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.HiveTable)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.HiveTable other = + (com.google.cloud.biglake.hive.v1beta.HiveTable) obj; + + if (!getName().equals(other.getName())) return false; + if (!getDescription().equals(other.getDescription())) return false; + if (hasStorageDescriptor() != other.hasStorageDescriptor()) return false; + if (hasStorageDescriptor()) { + if (!getStorageDescriptor().equals(other.getStorageDescriptor())) return false; + } + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (!getPartitionKeysList().equals(other.getPartitionKeysList())) return false; + if (!internalGetParameters().equals(other.internalGetParameters())) return false; + if (!getTableType().equals(other.getTableType())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; + hash = (53 * hash) + getDescription().hashCode(); + if (hasStorageDescriptor()) { + hash = (37 * hash) + STORAGE_DESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getStorageDescriptor().hashCode(); + } + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (getPartitionKeysCount() > 0) { + hash = (37 * hash) + PARTITION_KEYS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionKeysList().hashCode(); + } + if (!internalGetParameters().getMap().isEmpty()) { + hash = (37 * hash) + PARAMETERS_FIELD_NUMBER; + hash = (53 * hash) + internalGetParameters().hashCode(); + } + hash = (37 * hash) + TABLE_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getTableType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.biglake.hive.v1beta.HiveTable prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Stores the hive table information. It includes the table name, schema (column
+   * names and types), data location, storage format, serde info, etc. This
+   * message closely matches the Table object in the IMetastoreClient
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.HiveTable} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.HiveTable) + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveTable_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 8: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 8: + return internalGetMutableParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveTable_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.HiveTable.class, + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.HiveTable.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetStorageDescriptorFieldBuilder(); + internalGetCreateTimeFieldBuilder(); + internalGetPartitionKeysFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + description_ = ""; + storageDescriptor_ = null; + if (storageDescriptorBuilder_ != null) { + storageDescriptorBuilder_.dispose(); + storageDescriptorBuilder_ = null; + } + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + if (partitionKeysBuilder_ == null) { + partitionKeys_ = java.util.Collections.emptyList(); + } else { + partitionKeys_ = null; + partitionKeysBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + internalGetMutableParameters().clear(); + tableType_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_HiveTable_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveTable getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveTable build() { + com.google.cloud.biglake.hive.v1beta.HiveTable result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveTable buildPartial() { + com.google.cloud.biglake.hive.v1beta.HiveTable result = + new com.google.cloud.biglake.hive.v1beta.HiveTable(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.cloud.biglake.hive.v1beta.HiveTable result) { + if (partitionKeysBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0)) { + partitionKeys_ = java.util.Collections.unmodifiableList(partitionKeys_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.partitionKeys_ = partitionKeys_; + } else { + result.partitionKeys_ = partitionKeysBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.HiveTable result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.description_ = description_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.storageDescriptor_ = + storageDescriptorBuilder_ == null + ? storageDescriptor_ + : storageDescriptorBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.parameters_ = internalGetParameters(); + result.parameters_.makeImmutable(); + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.tableType_ = tableType_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.HiveTable) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.HiveTable) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.HiveTable other) { + if (other == com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getDescription().isEmpty()) { + description_ = other.description_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasStorageDescriptor()) { + mergeStorageDescriptor(other.getStorageDescriptor()); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (partitionKeysBuilder_ == null) { + if (!other.partitionKeys_.isEmpty()) { + if (partitionKeys_.isEmpty()) { + partitionKeys_ = other.partitionKeys_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensurePartitionKeysIsMutable(); + partitionKeys_.addAll(other.partitionKeys_); + } + onChanged(); + } + } else { + if (!other.partitionKeys_.isEmpty()) { + if (partitionKeysBuilder_.isEmpty()) { + partitionKeysBuilder_.dispose(); + partitionKeysBuilder_ = null; + partitionKeys_ = other.partitionKeys_; + bitField0_ = (bitField0_ & ~0x00000010); + partitionKeysBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetPartitionKeysFieldBuilder() + : null; + } else { + partitionKeysBuilder_.addAllMessages(other.partitionKeys_); + } + } + } + internalGetMutableParameters().mergeFrom(other.internalGetParameters()); + bitField0_ |= 0x00000020; + if (!other.getTableType().isEmpty()) { + tableType_ = other.tableType_; + bitField0_ |= 0x00000040; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + description_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetStorageDescriptorFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 58: + { + com.google.cloud.biglake.hive.v1beta.FieldSchema m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.FieldSchema.parser(), + extensionRegistry); + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.add(m); + } else { + partitionKeysBuilder_.addMessage(m); + } + break; + } // case 58 + case 66: + { + com.google.protobuf.MapEntry parameters__ = + input.readMessage( + ParametersDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableParameters() + .getMutableMap() + .put(parameters__.getKey(), parameters__.getValue()); + bitField0_ |= 0x00000020; + break; + } // case 66 + case 90: + { + tableType_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000040; + break; + } // case 90 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The resource name.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object description_ = ""; + + /** + * + * + *
+     * Optional. Description of the table. The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Description of the table. The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Description of the table. The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The description to set. + * @return This builder for chaining. + */ + public Builder setDescription(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + description_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Description of the table. The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDescription() { + description_ = getDefaultInstance().getDescription(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Description of the table. The maximum length is 4000 characters.
+     * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for description to set. + * @return This builder for chaining. + */ + public Builder setDescriptionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + description_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.cloud.biglake.hive.v1beta.StorageDescriptor storageDescriptor_; + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder> + storageDescriptorBuilder_; + + /** + * + * + *
+     * Required. Storage descriptor of the table.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the storageDescriptor field is set. + */ + public boolean hasStorageDescriptor() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Required. Storage descriptor of the table.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The storageDescriptor. + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor getStorageDescriptor() { + if (storageDescriptorBuilder_ == null) { + return storageDescriptor_ == null + ? com.google.cloud.biglake.hive.v1beta.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } else { + return storageDescriptorBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. Storage descriptor of the table.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setStorageDescriptor( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor value) { + if (storageDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + storageDescriptor_ = value; + } else { + storageDescriptorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Storage descriptor of the table.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setStorageDescriptor( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Builder builderForValue) { + if (storageDescriptorBuilder_ == null) { + storageDescriptor_ = builderForValue.build(); + } else { + storageDescriptorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Storage descriptor of the table.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeStorageDescriptor( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor value) { + if (storageDescriptorBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && storageDescriptor_ != null + && storageDescriptor_ + != com.google.cloud.biglake.hive.v1beta.StorageDescriptor.getDefaultInstance()) { + getStorageDescriptorBuilder().mergeFrom(value); + } else { + storageDescriptor_ = value; + } + } else { + storageDescriptorBuilder_.mergeFrom(value); + } + if (storageDescriptor_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. Storage descriptor of the table.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearStorageDescriptor() { + bitField0_ = (bitField0_ & ~0x00000004); + storageDescriptor_ = null; + if (storageDescriptorBuilder_ != null) { + storageDescriptorBuilder_.dispose(); + storageDescriptorBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Storage descriptor of the table.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Builder + getStorageDescriptorBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetStorageDescriptorFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. Storage descriptor of the table.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder + getStorageDescriptorOrBuilder() { + if (storageDescriptorBuilder_ != null) { + return storageDescriptorBuilder_.getMessageOrBuilder(); + } else { + return storageDescriptor_ == null + ? com.google.cloud.biglake.hive.v1beta.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } + } + + /** + * + * + *
+     * Required. Storage descriptor of the table.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder> + internalGetStorageDescriptorFieldBuilder() { + if (storageDescriptorBuilder_ == null) { + storageDescriptorBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder>( + getStorageDescriptor(), getParentForChildren(), isClean()); + storageDescriptor_ = null; + } + return storageDescriptorBuilder_; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. The creation time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Output only. The creation time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The creation time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000008); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The creation time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. The creation time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private java.util.List partitionKeys_ = + java.util.Collections.emptyList(); + + private void ensurePartitionKeysIsMutable() { + if (!((bitField0_ & 0x00000010) != 0)) { + partitionKeys_ = + new java.util.ArrayList( + partitionKeys_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.FieldSchema, + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder, + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder> + partitionKeysBuilder_; + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getPartitionKeysList() { + if (partitionKeysBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitionKeys_); + } else { + return partitionKeysBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getPartitionKeysCount() { + if (partitionKeysBuilder_ == null) { + return partitionKeys_.size(); + } else { + return partitionKeysBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchema getPartitionKeys(int index) { + if (partitionKeysBuilder_ == null) { + return partitionKeys_.get(index); + } else { + return partitionKeysBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPartitionKeys( + int index, com.google.cloud.biglake.hive.v1beta.FieldSchema value) { + if (partitionKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionKeysIsMutable(); + partitionKeys_.set(index, value); + onChanged(); + } else { + partitionKeysBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPartitionKeys( + int index, com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder builderForValue) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionKeysBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addPartitionKeys(com.google.cloud.biglake.hive.v1beta.FieldSchema value) { + if (partitionKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionKeysIsMutable(); + partitionKeys_.add(value); + onChanged(); + } else { + partitionKeysBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addPartitionKeys( + int index, com.google.cloud.biglake.hive.v1beta.FieldSchema value) { + if (partitionKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionKeysIsMutable(); + partitionKeys_.add(index, value); + onChanged(); + } else { + partitionKeysBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addPartitionKeys( + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder builderForValue) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.add(builderForValue.build()); + onChanged(); + } else { + partitionKeysBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addPartitionKeys( + int index, com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder builderForValue) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionKeysBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllPartitionKeys( + java.lang.Iterable values) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitionKeys_); + onChanged(); + } else { + partitionKeysBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearPartitionKeys() { + if (partitionKeysBuilder_ == null) { + partitionKeys_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + partitionKeysBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removePartitionKeys(int index) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.remove(index); + onChanged(); + } else { + partitionKeysBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder getPartitionKeysBuilder( + int index) { + return internalGetPartitionKeysFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder getPartitionKeysOrBuilder( + int index) { + if (partitionKeysBuilder_ == null) { + return partitionKeys_.get(index); + } else { + return partitionKeysBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getPartitionKeysOrBuilderList() { + if (partitionKeysBuilder_ != null) { + return partitionKeysBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitionKeys_); + } + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder addPartitionKeysBuilder() { + return internalGetPartitionKeysFieldBuilder() + .addBuilder(com.google.cloud.biglake.hive.v1beta.FieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder addPartitionKeysBuilder( + int index) { + return internalGetPartitionKeysFieldBuilder() + .addBuilder(index, com.google.cloud.biglake.hive.v1beta.FieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. The partition keys of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getPartitionKeysBuilderList() { + return internalGetPartitionKeysFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.FieldSchema, + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder, + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder> + internalGetPartitionKeysFieldBuilder() { + if (partitionKeysBuilder_ == null) { + partitionKeysBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.FieldSchema, + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder, + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder>( + partitionKeys_, + ((bitField0_ & 0x00000010) != 0), + getParentForChildren(), + isClean()); + partitionKeys_ = null; + } + return partitionKeysBuilder_; + } + + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField + internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + private com.google.protobuf.MapField + internalGetMutableParameters() { + if (parameters_ == null) { + parameters_ = + com.google.protobuf.MapField.newMapField(ParametersDefaultEntryHolder.defaultEntry); + } + if (!parameters_.isMutable()) { + parameters_ = parameters_.copy(); + } + bitField0_ |= 0x00000020; + onChanged(); + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the table. The maximum size
+     * is 4MiB.
+     * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the table. The maximum size
+     * is 4MiB.
+     * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the table. The maximum size
+     * is 4MiB.
+     * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the table. The maximum size
+     * is 4MiB.
+     * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearParameters() { + bitField0_ = (bitField0_ & ~0x00000020); + internalGetMutableParameters().getMutableMap().clear(); + return this; + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the table. The maximum size
+     * is 4MiB.
+     * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableParameters().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableParameters() { + bitField0_ |= 0x00000020; + return internalGetMutableParameters().getMutableMap(); + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the table. The maximum size
+     * is 4MiB.
+     * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putParameters(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableParameters().getMutableMap().put(key, value); + bitField0_ |= 0x00000020; + return this; + } + + /** + * + * + *
+     * Optional. Stores the properties associated with the table. The maximum size
+     * is 4MiB.
+     * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllParameters(java.util.Map values) { + internalGetMutableParameters().getMutableMap().putAll(values); + bitField0_ |= 0x00000020; + return this; + } + + private java.lang.Object tableType_ = ""; + + /** + * + * + *
+     * Output only. The type of the table. This is EXTERNAL for BigLake hive
+     * tables.
+     * 
+ * + * string table_type = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The tableType. + */ + public java.lang.String getTableType() { + java.lang.Object ref = tableType_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + tableType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The type of the table. This is EXTERNAL for BigLake hive
+     * tables.
+     * 
+ * + * string table_type = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for tableType. + */ + public com.google.protobuf.ByteString getTableTypeBytes() { + java.lang.Object ref = tableType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + tableType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The type of the table. This is EXTERNAL for BigLake hive
+     * tables.
+     * 
+ * + * string table_type = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The tableType to set. + * @return This builder for chaining. + */ + public Builder setTableType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + tableType_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The type of the table. This is EXTERNAL for BigLake hive
+     * tables.
+     * 
+ * + * string table_type = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearTableType() { + tableType_ = getDefaultInstance().getTableType(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The type of the table. This is EXTERNAL for BigLake hive
+     * tables.
+     * 
+ * + * string table_type = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for tableType to set. + * @return This builder for chaining. + */ + public Builder setTableTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + tableType_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.HiveTable) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.HiveTable) + private static final com.google.cloud.biglake.hive.v1beta.HiveTable DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.HiveTable(); + } + + public static com.google.cloud.biglake.hive.v1beta.HiveTable getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public HiveTable parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveTable getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveTableOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveTableOrBuilder.java new file mode 100644 index 000000000000..aa63e832a079 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/HiveTableOrBuilder.java @@ -0,0 +1,338 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface HiveTableOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.HiveTable) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. The resource name.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Output only. The resource name.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Optional. Description of the table. The maximum length is 4000 characters.
+   * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + java.lang.String getDescription(); + + /** + * + * + *
+   * Optional. Description of the table. The maximum length is 4000 characters.
+   * 
+ * + * string description = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + com.google.protobuf.ByteString getDescriptionBytes(); + + /** + * + * + *
+   * Required. Storage descriptor of the table.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the storageDescriptor field is set. + */ + boolean hasStorageDescriptor(); + + /** + * + * + *
+   * Required. Storage descriptor of the table.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The storageDescriptor. + */ + com.google.cloud.biglake.hive.v1beta.StorageDescriptor getStorageDescriptor(); + + /** + * + * + *
+   * Required. Storage descriptor of the table.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder getStorageDescriptorOrBuilder(); + + /** + * + * + *
+   * Output only. The creation time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Optional. The partition keys of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getPartitionKeysList(); + + /** + * + * + *
+   * Optional. The partition keys of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.biglake.hive.v1beta.FieldSchema getPartitionKeys(int index); + + /** + * + * + *
+   * Optional. The partition keys of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getPartitionKeysCount(); + + /** + * + * + *
+   * Optional. The partition keys of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getPartitionKeysOrBuilderList(); + + /** + * + * + *
+   * Optional. The partition keys of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema partition_keys = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder getPartitionKeysOrBuilder(int index); + + /** + * + * + *
+   * Optional. Stores the properties associated with the table. The maximum size
+   * is 4MiB.
+   * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getParametersCount(); + + /** + * + * + *
+   * Optional. Stores the properties associated with the table. The maximum size
+   * is 4MiB.
+   * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsParameters(java.lang.String key); + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Deprecated + java.util.Map getParameters(); + + /** + * + * + *
+   * Optional. Stores the properties associated with the table. The maximum size
+   * is 4MiB.
+   * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getParametersMap(); + + /** + * + * + *
+   * Optional. Stores the properties associated with the table. The maximum size
+   * is 4MiB.
+   * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + /* nullable */ + java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
+   * Optional. Stores the properties associated with the table. The maximum size
+   * is 4MiB.
+   * 
+ * + * map<string, string> parameters = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getParametersOrThrow(java.lang.String key); + + /** + * + * + *
+   * Output only. The type of the table. This is EXTERNAL for BigLake hive
+   * tables.
+   * 
+ * + * string table_type = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The tableType. + */ + java.lang.String getTableType(); + + /** + * + * + *
+   * Output only. The type of the table. This is EXTERNAL for BigLake hive
+   * tables.
+   * 
+ * + * string table_type = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for tableType. + */ + com.google.protobuf.ByteString getTableTypeBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveCatalogsRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveCatalogsRequest.java new file mode 100644 index 000000000000..6b57725a13df --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveCatalogsRequest.java @@ -0,0 +1,908 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the ListHiveCatalogs method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest} + */ +@com.google.protobuf.Generated +public final class ListHiveCatalogsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest) + ListHiveCatalogsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListHiveCatalogsRequest"); + } + + // Use ListHiveCatalogsRequest.newBuilder() to construct. + private ListHiveCatalogsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListHiveCatalogsRequest() { + parent_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest.class, + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. The project to list catalogs from.
+   * Format: projects/{project_id_or_number}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The project to list catalogs from.
+   * Format: projects/{project_id_or_number}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
+   * Optional. Page size for pagination.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
+   * Optional. Page token for pagination.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Page token for pagination.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest other = + (com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the ListHiveCatalogs method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest) + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest.class, + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest build() { + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest result = + new com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest other) { + if (other + == com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. The project to list catalogs from.
+     * Format: projects/{project_id_or_number}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The project to list catalogs from.
+     * Format: projects/{project_id_or_number}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The project to list catalogs from.
+     * Format: projects/{project_id_or_number}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The project to list catalogs from.
+     * Format: projects/{project_id_or_number}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The project to list catalogs from.
+     * Format: projects/{project_id_or_number}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
+     * Optional. Page size for pagination.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
+     * Optional. Page size for pagination.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Page size for pagination.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
+     * Optional. Page token for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Page token for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Page token for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Page token for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Page token for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest) + private static final com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListHiveCatalogsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveCatalogsRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveCatalogsRequestOrBuilder.java new file mode 100644 index 000000000000..e15ef5cad196 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveCatalogsRequestOrBuilder.java @@ -0,0 +1,99 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface ListHiveCatalogsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The project to list catalogs from.
+   * Format: projects/{project_id_or_number}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. The project to list catalogs from.
+   * Format: projects/{project_id_or_number}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. Page size for pagination.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
+   * Optional. Page token for pagination.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
+   * Optional. Page token for pagination.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveCatalogsResponse.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveCatalogsResponse.java new file mode 100644 index 000000000000..ce3dced1dc1f --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveCatalogsResponse.java @@ -0,0 +1,1476 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Response message for the ListHiveCatalogs method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse} + */ +@com.google.protobuf.Generated +public final class ListHiveCatalogsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse) + ListHiveCatalogsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListHiveCatalogsResponse"); + } + + // Use ListHiveCatalogsResponse.newBuilder() to construct. + private ListHiveCatalogsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListHiveCatalogsResponse() { + catalogs_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse.class, + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse.Builder.class); + } + + public static final int CATALOGS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List catalogs_; + + /** + * + * + *
+   * Output only. The catalogs from the specified project.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List getCatalogsList() { + return catalogs_; + } + + /** + * + * + *
+   * Output only. The catalogs from the specified project.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getCatalogsOrBuilderList() { + return catalogs_; + } + + /** + * + * + *
+   * Output only. The catalogs from the specified project.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getCatalogsCount() { + return catalogs_.size(); + } + + /** + * + * + *
+   * Output only. The catalogs from the specified project.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog getCatalogs(int index) { + return catalogs_.get(index); + } + + /** + * + * + *
+   * Output only. The catalogs from the specified project.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder getCatalogsOrBuilder(int index) { + return catalogs_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+   * Output only. A token, which can be sent as `page_token` to retrieve the
+   * next page. If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. A token, which can be sent as `page_token` to retrieve the
+   * next page. If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int UNREACHABLE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList unreachable_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+   * Output only. The list of unreachable cloud regions. If non-empty, the
+   * result set might be incomplete.
+   * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return A list containing the unreachable. + */ + public com.google.protobuf.ProtocolStringList getUnreachableList() { + return unreachable_; + } + + /** + * + * + *
+   * Output only. The list of unreachable cloud regions. If non-empty, the
+   * result set might be incomplete.
+   * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The count of unreachable. + */ + public int getUnreachableCount() { + return unreachable_.size(); + } + + /** + * + * + *
+   * Output only. The list of unreachable cloud regions. If non-empty, the
+   * result set might be incomplete.
+   * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + public java.lang.String getUnreachable(int index) { + return unreachable_.get(index); + } + + /** + * + * + *
+   * Output only. The list of unreachable cloud regions. If non-empty, the
+   * result set might be incomplete.
+   * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + public com.google.protobuf.ByteString getUnreachableBytes(int index) { + return unreachable_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < catalogs_.size(); i++) { + output.writeMessage(1, catalogs_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + for (int i = 0; i < unreachable_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, unreachable_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < catalogs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, catalogs_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + { + int dataSize = 0; + for (int i = 0; i < unreachable_.size(); i++) { + dataSize += computeStringSizeNoTag(unreachable_.getRaw(i)); + } + size += dataSize; + size += 1 * getUnreachableList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse other = + (com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse) obj; + + if (!getCatalogsList().equals(other.getCatalogsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnreachableList().equals(other.getUnreachableList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getCatalogsCount() > 0) { + hash = (37 * hash) + CATALOGS_FIELD_NUMBER; + hash = (53 * hash) + getCatalogsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + if (getUnreachableCount() > 0) { + hash = (37 * hash) + UNREACHABLE_FIELD_NUMBER; + hash = (53 * hash) + getUnreachableList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for the ListHiveCatalogs method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse) + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse.class, + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (catalogsBuilder_ == null) { + catalogs_ = java.util.Collections.emptyList(); + } else { + catalogs_ = null; + catalogsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveCatalogsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse build() { + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse buildPartial() { + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse result = + new com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse result) { + if (catalogsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + catalogs_ = java.util.Collections.unmodifiableList(catalogs_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.catalogs_ = catalogs_; + } else { + result.catalogs_ = catalogsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + unreachable_.makeImmutable(); + result.unreachable_ = unreachable_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse other) { + if (other + == com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse.getDefaultInstance()) + return this; + if (catalogsBuilder_ == null) { + if (!other.catalogs_.isEmpty()) { + if (catalogs_.isEmpty()) { + catalogs_ = other.catalogs_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureCatalogsIsMutable(); + catalogs_.addAll(other.catalogs_); + } + onChanged(); + } + } else { + if (!other.catalogs_.isEmpty()) { + if (catalogsBuilder_.isEmpty()) { + catalogsBuilder_.dispose(); + catalogsBuilder_ = null; + catalogs_ = other.catalogs_; + bitField0_ = (bitField0_ & ~0x00000001); + catalogsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetCatalogsFieldBuilder() + : null; + } else { + catalogsBuilder_.addAllMessages(other.catalogs_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.unreachable_.isEmpty()) { + if (unreachable_.isEmpty()) { + unreachable_ = other.unreachable_; + bitField0_ |= 0x00000004; + } else { + ensureUnreachableIsMutable(); + unreachable_.addAll(other.unreachable_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.biglake.hive.v1beta.HiveCatalog m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.parser(), + extensionRegistry); + if (catalogsBuilder_ == null) { + ensureCatalogsIsMutable(); + catalogs_.add(m); + } else { + catalogsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureUnreachableIsMutable(); + unreachable_.add(s); + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List catalogs_ = + java.util.Collections.emptyList(); + + private void ensureCatalogsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + catalogs_ = + new java.util.ArrayList(catalogs_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveCatalog, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder, + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder> + catalogsBuilder_; + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List getCatalogsList() { + if (catalogsBuilder_ == null) { + return java.util.Collections.unmodifiableList(catalogs_); + } else { + return catalogsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getCatalogsCount() { + if (catalogsBuilder_ == null) { + return catalogs_.size(); + } else { + return catalogsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog getCatalogs(int index) { + if (catalogsBuilder_ == null) { + return catalogs_.get(index); + } else { + return catalogsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCatalogs(int index, com.google.cloud.biglake.hive.v1beta.HiveCatalog value) { + if (catalogsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCatalogsIsMutable(); + catalogs_.set(index, value); + onChanged(); + } else { + catalogsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCatalogs( + int index, com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder builderForValue) { + if (catalogsBuilder_ == null) { + ensureCatalogsIsMutable(); + catalogs_.set(index, builderForValue.build()); + onChanged(); + } else { + catalogsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addCatalogs(com.google.cloud.biglake.hive.v1beta.HiveCatalog value) { + if (catalogsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCatalogsIsMutable(); + catalogs_.add(value); + onChanged(); + } else { + catalogsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addCatalogs(int index, com.google.cloud.biglake.hive.v1beta.HiveCatalog value) { + if (catalogsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCatalogsIsMutable(); + catalogs_.add(index, value); + onChanged(); + } else { + catalogsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addCatalogs( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder builderForValue) { + if (catalogsBuilder_ == null) { + ensureCatalogsIsMutable(); + catalogs_.add(builderForValue.build()); + onChanged(); + } else { + catalogsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addCatalogs( + int index, com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder builderForValue) { + if (catalogsBuilder_ == null) { + ensureCatalogsIsMutable(); + catalogs_.add(index, builderForValue.build()); + onChanged(); + } else { + catalogsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllCatalogs( + java.lang.Iterable values) { + if (catalogsBuilder_ == null) { + ensureCatalogsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, catalogs_); + onChanged(); + } else { + catalogsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCatalogs() { + if (catalogsBuilder_ == null) { + catalogs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + catalogsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeCatalogs(int index) { + if (catalogsBuilder_ == null) { + ensureCatalogsIsMutable(); + catalogs_.remove(index); + onChanged(); + } else { + catalogsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder getCatalogsBuilder(int index) { + return internalGetCatalogsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder getCatalogsOrBuilder( + int index) { + if (catalogsBuilder_ == null) { + return catalogs_.get(index); + } else { + return catalogsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getCatalogsOrBuilderList() { + if (catalogsBuilder_ != null) { + return catalogsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(catalogs_); + } + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder addCatalogsBuilder() { + return internalGetCatalogsFieldBuilder() + .addBuilder(com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder addCatalogsBuilder(int index) { + return internalGetCatalogsFieldBuilder() + .addBuilder(index, com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. The catalogs from the specified project.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getCatalogsBuilderList() { + return internalGetCatalogsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveCatalog, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder, + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder> + internalGetCatalogsFieldBuilder() { + if (catalogsBuilder_ == null) { + catalogsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveCatalog, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder, + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder>( + catalogs_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + catalogs_ = null; + } + return catalogsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList unreachable_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureUnreachableIsMutable() { + if (!unreachable_.isModifiable()) { + unreachable_ = new com.google.protobuf.LazyStringArrayList(unreachable_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
+     * Output only. The list of unreachable cloud regions. If non-empty, the
+     * result set might be incomplete.
+     * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return A list containing the unreachable. + */ + public com.google.protobuf.ProtocolStringList getUnreachableList() { + unreachable_.makeImmutable(); + return unreachable_; + } + + /** + * + * + *
+     * Output only. The list of unreachable cloud regions. If non-empty, the
+     * result set might be incomplete.
+     * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The count of unreachable. + */ + public int getUnreachableCount() { + return unreachable_.size(); + } + + /** + * + * + *
+     * Output only. The list of unreachable cloud regions. If non-empty, the
+     * result set might be incomplete.
+     * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + public java.lang.String getUnreachable(int index) { + return unreachable_.get(index); + } + + /** + * + * + *
+     * Output only. The list of unreachable cloud regions. If non-empty, the
+     * result set might be incomplete.
+     * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + public com.google.protobuf.ByteString getUnreachableBytes(int index) { + return unreachable_.getByteString(index); + } + + /** + * + * + *
+     * Output only. The list of unreachable cloud regions. If non-empty, the
+     * result set might be incomplete.
+     * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param index The index to set the value at. + * @param value The unreachable to set. + * @return This builder for chaining. + */ + public Builder setUnreachable(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUnreachableIsMutable(); + unreachable_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The list of unreachable cloud regions. If non-empty, the
+     * result set might be incomplete.
+     * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The unreachable to add. + * @return This builder for chaining. + */ + public Builder addUnreachable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUnreachableIsMutable(); + unreachable_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The list of unreachable cloud regions. If non-empty, the
+     * result set might be incomplete.
+     * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param values The unreachable to add. + * @return This builder for chaining. + */ + public Builder addAllUnreachable(java.lang.Iterable values) { + ensureUnreachableIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, unreachable_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The list of unreachable cloud regions. If non-empty, the
+     * result set might be incomplete.
+     * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearUnreachable() { + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The list of unreachable cloud regions. If non-empty, the
+     * result set might be incomplete.
+     * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes of the unreachable to add. + * @return This builder for chaining. + */ + public Builder addUnreachableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureUnreachableIsMutable(); + unreachable_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse) + private static final com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse(); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListHiveCatalogsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveCatalogsResponseOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveCatalogsResponseOrBuilder.java new file mode 100644 index 000000000000..6c17252b07f0 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveCatalogsResponseOrBuilder.java @@ -0,0 +1,180 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface ListHiveCatalogsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. The catalogs from the specified project.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getCatalogsList(); + + /** + * + * + *
+   * Output only. The catalogs from the specified project.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveCatalog getCatalogs(int index); + + /** + * + * + *
+   * Output only. The catalogs from the specified project.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getCatalogsCount(); + + /** + * + * + *
+   * Output only. The catalogs from the specified project.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getCatalogsOrBuilderList(); + + /** + * + * + *
+   * Output only. The catalogs from the specified project.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveCatalog catalogs = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder getCatalogsOrBuilder(int index); + + /** + * + * + *
+   * Output only. A token, which can be sent as `page_token` to retrieve the
+   * next page. If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
+   * Output only. A token, which can be sent as `page_token` to retrieve the
+   * next page. If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); + + /** + * + * + *
+   * Output only. The list of unreachable cloud regions. If non-empty, the
+   * result set might be incomplete.
+   * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return A list containing the unreachable. + */ + java.util.List getUnreachableList(); + + /** + * + * + *
+   * Output only. The list of unreachable cloud regions. If non-empty, the
+   * result set might be incomplete.
+   * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The count of unreachable. + */ + int getUnreachableCount(); + + /** + * + * + *
+   * Output only. The list of unreachable cloud regions. If non-empty, the
+   * result set might be incomplete.
+   * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + java.lang.String getUnreachable(int index); + + /** + * + * + *
+   * Output only. The list of unreachable cloud regions. If non-empty, the
+   * result set might be incomplete.
+   * 
+ * + * repeated string unreachable = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + com.google.protobuf.ByteString getUnreachableBytes(int index); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveDatabasesRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveDatabasesRequest.java new file mode 100644 index 000000000000..5a91f29fb998 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveDatabasesRequest.java @@ -0,0 +1,908 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the ListHiveDatabases method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest} + */ +@com.google.protobuf.Generated +public final class ListHiveDatabasesRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest) + ListHiveDatabasesRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListHiveDatabasesRequest"); + } + + // Use ListHiveDatabasesRequest.newBuilder() to construct. + private ListHiveDatabasesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListHiveDatabasesRequest() { + parent_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest.class, + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. The hive catalog to list databases from.
+   * Format: projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The hive catalog to list databases from.
+   * Format: projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
+   * Optional. Page size for pagination.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
+   * Optional. PageToken for pagination.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. PageToken for pagination.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest other = + (com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the ListHiveDatabases method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest) + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest.class, + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest build() { + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest result = + new com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest other) { + if (other + == com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. The hive catalog to list databases from.
+     * Format: projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The hive catalog to list databases from.
+     * Format: projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The hive catalog to list databases from.
+     * Format: projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The hive catalog to list databases from.
+     * Format: projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The hive catalog to list databases from.
+     * Format: projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
+     * Optional. Page size for pagination.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
+     * Optional. Page size for pagination.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Page size for pagination.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
+     * Optional. PageToken for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. PageToken for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. PageToken for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. PageToken for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. PageToken for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest) + private static final com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListHiveDatabasesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveDatabasesRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveDatabasesRequestOrBuilder.java new file mode 100644 index 000000000000..696b7de592c1 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveDatabasesRequestOrBuilder.java @@ -0,0 +1,99 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface ListHiveDatabasesRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The hive catalog to list databases from.
+   * Format: projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. The hive catalog to list databases from.
+   * Format: projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. Page size for pagination.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
+   * Optional. PageToken for pagination.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
+   * Optional. PageToken for pagination.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveDatabasesResponse.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveDatabasesResponse.java new file mode 100644 index 000000000000..2ca04306aa73 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveDatabasesResponse.java @@ -0,0 +1,1181 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Response message for the ListHiveDatabases method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse} + */ +@com.google.protobuf.Generated +public final class ListHiveDatabasesResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse) + ListHiveDatabasesResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListHiveDatabasesResponse"); + } + + // Use ListHiveDatabasesResponse.newBuilder() to construct. + private ListHiveDatabasesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListHiveDatabasesResponse() { + databases_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse.class, + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse.Builder.class); + } + + public static final int DATABASES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List databases_; + + /** + * + * + *
+   * Output only. The databases from the specified project and catalog.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List getDatabasesList() { + return databases_; + } + + /** + * + * + *
+   * Output only. The databases from the specified project and catalog.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getDatabasesOrBuilderList() { + return databases_; + } + + /** + * + * + *
+   * Output only. The databases from the specified project and catalog.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getDatabasesCount() { + return databases_.size(); + } + + /** + * + * + *
+   * Output only. The databases from the specified project and catalog.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveDatabase getDatabases(int index) { + return databases_.get(index); + } + + /** + * + * + *
+   * Output only. The databases from the specified project and catalog.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder getDatabasesOrBuilder( + int index) { + return databases_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+   * Output only. A token, which can be sent as `page_token` to retrieve the
+   * next page. If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. A token, which can be sent as `page_token` to retrieve the
+   * next page. If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < databases_.size(); i++) { + output.writeMessage(1, databases_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < databases_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, databases_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse other = + (com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse) obj; + + if (!getDatabasesList().equals(other.getDatabasesList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getDatabasesCount() > 0) { + hash = (37 * hash) + DATABASES_FIELD_NUMBER; + hash = (53 * hash) + getDatabasesList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for the ListHiveDatabases method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse) + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse.class, + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (databasesBuilder_ == null) { + databases_ = java.util.Collections.emptyList(); + } else { + databases_ = null; + databasesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveDatabasesResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse build() { + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse buildPartial() { + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse result = + new com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse result) { + if (databasesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + databases_ = java.util.Collections.unmodifiableList(databases_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.databases_ = databases_; + } else { + result.databases_ = databasesBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse other) { + if (other + == com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse.getDefaultInstance()) + return this; + if (databasesBuilder_ == null) { + if (!other.databases_.isEmpty()) { + if (databases_.isEmpty()) { + databases_ = other.databases_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureDatabasesIsMutable(); + databases_.addAll(other.databases_); + } + onChanged(); + } + } else { + if (!other.databases_.isEmpty()) { + if (databasesBuilder_.isEmpty()) { + databasesBuilder_.dispose(); + databasesBuilder_ = null; + databases_ = other.databases_; + bitField0_ = (bitField0_ & ~0x00000001); + databasesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetDatabasesFieldBuilder() + : null; + } else { + databasesBuilder_.addAllMessages(other.databases_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.biglake.hive.v1beta.HiveDatabase m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.HiveDatabase.parser(), + extensionRegistry); + if (databasesBuilder_ == null) { + ensureDatabasesIsMutable(); + databases_.add(m); + } else { + databasesBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List databases_ = + java.util.Collections.emptyList(); + + private void ensureDatabasesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + databases_ = + new java.util.ArrayList(databases_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveDatabase, + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder, + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder> + databasesBuilder_; + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List getDatabasesList() { + if (databasesBuilder_ == null) { + return java.util.Collections.unmodifiableList(databases_); + } else { + return databasesBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getDatabasesCount() { + if (databasesBuilder_ == null) { + return databases_.size(); + } else { + return databasesBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase getDatabases(int index) { + if (databasesBuilder_ == null) { + return databases_.get(index); + } else { + return databasesBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setDatabases( + int index, com.google.cloud.biglake.hive.v1beta.HiveDatabase value) { + if (databasesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatabasesIsMutable(); + databases_.set(index, value); + onChanged(); + } else { + databasesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setDatabases( + int index, com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder builderForValue) { + if (databasesBuilder_ == null) { + ensureDatabasesIsMutable(); + databases_.set(index, builderForValue.build()); + onChanged(); + } else { + databasesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addDatabases(com.google.cloud.biglake.hive.v1beta.HiveDatabase value) { + if (databasesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatabasesIsMutable(); + databases_.add(value); + onChanged(); + } else { + databasesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addDatabases( + int index, com.google.cloud.biglake.hive.v1beta.HiveDatabase value) { + if (databasesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatabasesIsMutable(); + databases_.add(index, value); + onChanged(); + } else { + databasesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addDatabases( + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder builderForValue) { + if (databasesBuilder_ == null) { + ensureDatabasesIsMutable(); + databases_.add(builderForValue.build()); + onChanged(); + } else { + databasesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addDatabases( + int index, com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder builderForValue) { + if (databasesBuilder_ == null) { + ensureDatabasesIsMutable(); + databases_.add(index, builderForValue.build()); + onChanged(); + } else { + databasesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllDatabases( + java.lang.Iterable values) { + if (databasesBuilder_ == null) { + ensureDatabasesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, databases_); + onChanged(); + } else { + databasesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearDatabases() { + if (databasesBuilder_ == null) { + databases_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + databasesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeDatabases(int index) { + if (databasesBuilder_ == null) { + ensureDatabasesIsMutable(); + databases_.remove(index); + onChanged(); + } else { + databasesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder getDatabasesBuilder( + int index) { + return internalGetDatabasesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder getDatabasesOrBuilder( + int index) { + if (databasesBuilder_ == null) { + return databases_.get(index); + } else { + return databasesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getDatabasesOrBuilderList() { + if (databasesBuilder_ != null) { + return databasesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(databases_); + } + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder addDatabasesBuilder() { + return internalGetDatabasesFieldBuilder() + .addBuilder(com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder addDatabasesBuilder( + int index) { + return internalGetDatabasesFieldBuilder() + .addBuilder( + index, com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. The databases from the specified project and catalog.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getDatabasesBuilderList() { + return internalGetDatabasesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveDatabase, + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder, + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder> + internalGetDatabasesFieldBuilder() { + if (databasesBuilder_ == null) { + databasesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveDatabase, + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder, + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder>( + databases_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + databases_ = null; + } + return databasesBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse) + private static final com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse(); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListHiveDatabasesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveDatabasesResponseOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveDatabasesResponseOrBuilder.java new file mode 100644 index 000000000000..10a86e771fc2 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveDatabasesResponseOrBuilder.java @@ -0,0 +1,122 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface ListHiveDatabasesResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. The databases from the specified project and catalog.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getDatabasesList(); + + /** + * + * + *
+   * Output only. The databases from the specified project and catalog.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveDatabase getDatabases(int index); + + /** + * + * + *
+   * Output only. The databases from the specified project and catalog.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getDatabasesCount(); + + /** + * + * + *
+   * Output only. The databases from the specified project and catalog.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getDatabasesOrBuilderList(); + + /** + * + * + *
+   * Output only. The databases from the specified project and catalog.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveDatabase databases = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder getDatabasesOrBuilder(int index); + + /** + * + * + *
+   * Output only. A token, which can be sent as `page_token` to retrieve the
+   * next page. If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
+   * Output only. A token, which can be sent as `page_token` to retrieve the
+   * next page. If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveTablesRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveTablesRequest.java new file mode 100644 index 000000000000..0d72142d5bd1 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveTablesRequest.java @@ -0,0 +1,911 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the ListHiveTables method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListHiveTablesRequest} + */ +@com.google.protobuf.Generated +public final class ListHiveTablesRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.ListHiveTablesRequest) + ListHiveTablesRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListHiveTablesRequest"); + } + + // Use ListHiveTablesRequest.newBuilder() to construct. + private ListHiveTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListHiveTablesRequest() { + parent_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest.class, + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. The database to list tables from.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The database to list tables from.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
+   * Optional. Page size for pagination.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
+   * Optional. PageToken for pagination.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. PageToken for pagination.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest other = + (com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the ListHiveTables method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListHiveTablesRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.ListHiveTablesRequest) + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest.class, + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest build() { + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest result = + new com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest other) { + if (other == com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. The database to list tables from.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The database to list tables from.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The database to list tables from.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The database to list tables from.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The database to list tables from.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
+     * Optional. Page size for pagination.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
+     * Optional. Page size for pagination.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Page size for pagination.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
+     * Optional. PageToken for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. PageToken for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. PageToken for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. PageToken for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. PageToken for pagination.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.ListHiveTablesRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.ListHiveTablesRequest) + private static final com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListHiveTablesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveTablesRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveTablesRequestOrBuilder.java new file mode 100644 index 000000000000..e28c70df810f --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveTablesRequestOrBuilder.java @@ -0,0 +1,101 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface ListHiveTablesRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.ListHiveTablesRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The database to list tables from.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. The database to list tables from.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. Page size for pagination.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
+   * Optional. PageToken for pagination.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
+   * Optional. PageToken for pagination.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveTablesResponse.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveTablesResponse.java new file mode 100644 index 000000000000..90747a4a96b4 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveTablesResponse.java @@ -0,0 +1,1166 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Response message for the ListHiveTables method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListHiveTablesResponse} + */ +@com.google.protobuf.Generated +public final class ListHiveTablesResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.ListHiveTablesResponse) + ListHiveTablesResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListHiveTablesResponse"); + } + + // Use ListHiveTablesResponse.newBuilder() to construct. + private ListHiveTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListHiveTablesResponse() { + tables_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse.class, + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse.Builder.class); + } + + public static final int TABLES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List tables_; + + /** + * + * + *
+   * Output only. The tables from the specified project, catalog and database.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List getTablesList() { + return tables_; + } + + /** + * + * + *
+   * Output only. The tables from the specified project, catalog and database.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getTablesOrBuilderList() { + return tables_; + } + + /** + * + * + *
+   * Output only. The tables from the specified project, catalog and database.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getTablesCount() { + return tables_.size(); + } + + /** + * + * + *
+   * Output only. The tables from the specified project, catalog and database.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveTable getTables(int index) { + return tables_.get(index); + } + + /** + * + * + *
+   * Output only. The tables from the specified project, catalog and database.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder getTablesOrBuilder(int index) { + return tables_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+   * Output only. A token, which can be sent as `page_token` to retrieve the
+   * next page. If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. A token, which can be sent as `page_token` to retrieve the
+   * next page. If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < tables_.size(); i++) { + output.writeMessage(1, tables_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < tables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, tables_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse other = + (com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse) obj; + + if (!getTablesList().equals(other.getTablesList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getTablesCount() > 0) { + hash = (37 * hash) + TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTablesList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for the ListHiveTables method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListHiveTablesResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.ListHiveTablesResponse) + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse.class, + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + } else { + tables_ = null; + tablesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListHiveTablesResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse build() { + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse buildPartial() { + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse result = + new com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse result) { + if (tablesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.tables_ = tables_; + } else { + result.tables_ = tablesBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse other) { + if (other == com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse.getDefaultInstance()) + return this; + if (tablesBuilder_ == null) { + if (!other.tables_.isEmpty()) { + if (tables_.isEmpty()) { + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTablesIsMutable(); + tables_.addAll(other.tables_); + } + onChanged(); + } + } else { + if (!other.tables_.isEmpty()) { + if (tablesBuilder_.isEmpty()) { + tablesBuilder_.dispose(); + tablesBuilder_ = null; + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000001); + tablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetTablesFieldBuilder() + : null; + } else { + tablesBuilder_.addAllMessages(other.tables_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.biglake.hive.v1beta.HiveTable m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.HiveTable.parser(), extensionRegistry); + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(m); + } else { + tablesBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List tables_ = + java.util.Collections.emptyList(); + + private void ensureTablesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + tables_ = new java.util.ArrayList(tables_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveTable, + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder, + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder> + tablesBuilder_; + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List getTablesList() { + if (tablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tables_); + } else { + return tablesBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getTablesCount() { + if (tablesBuilder_ == null) { + return tables_.size(); + } else { + return tablesBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveTable getTables(int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); + } else { + return tablesBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setTables(int index, com.google.cloud.biglake.hive.v1beta.HiveTable value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.set(index, value); + onChanged(); + } else { + tablesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setTables( + int index, com.google.cloud.biglake.hive.v1beta.HiveTable.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.set(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addTables(com.google.cloud.biglake.hive.v1beta.HiveTable value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(value); + onChanged(); + } else { + tablesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addTables(int index, com.google.cloud.biglake.hive.v1beta.HiveTable value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(index, value); + onChanged(); + } else { + tablesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addTables( + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addTables( + int index, com.google.cloud.biglake.hive.v1beta.HiveTable.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllTables( + java.lang.Iterable values) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, tables_); + onChanged(); + } else { + tablesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearTables() { + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + tablesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeTables(int index) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.remove(index); + onChanged(); + } else { + tablesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveTable.Builder getTablesBuilder(int index) { + return internalGetTablesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder getTablesOrBuilder(int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); + } else { + return tablesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getTablesOrBuilderList() { + if (tablesBuilder_ != null) { + return tablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tables_); + } + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveTable.Builder addTablesBuilder() { + return internalGetTablesFieldBuilder() + .addBuilder(com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveTable.Builder addTablesBuilder(int index) { + return internalGetTablesFieldBuilder() + .addBuilder(index, com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. The tables from the specified project, catalog and database.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getTablesBuilderList() { + return internalGetTablesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveTable, + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder, + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder> + internalGetTablesFieldBuilder() { + if (tablesBuilder_ == null) { + tablesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveTable, + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder, + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder>( + tables_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + tables_ = null; + } + return tablesBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. A token, which can be sent as `page_token` to retrieve the
+     * next page. If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.ListHiveTablesResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.ListHiveTablesResponse) + private static final com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse(); + } + + public static com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListHiveTablesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveTablesResponseOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveTablesResponseOrBuilder.java new file mode 100644 index 000000000000..961c7d23d784 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListHiveTablesResponseOrBuilder.java @@ -0,0 +1,122 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface ListHiveTablesResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.ListHiveTablesResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. The tables from the specified project, catalog and database.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getTablesList(); + + /** + * + * + *
+   * Output only. The tables from the specified project, catalog and database.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveTable getTables(int index); + + /** + * + * + *
+   * Output only. The tables from the specified project, catalog and database.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getTablesCount(); + + /** + * + * + *
+   * Output only. The tables from the specified project, catalog and database.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getTablesOrBuilderList(); + + /** + * + * + *
+   * Output only. The tables from the specified project, catalog and database.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.HiveTable tables = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder getTablesOrBuilder(int index); + + /** + * + * + *
+   * Output only. A token, which can be sent as `page_token` to retrieve the
+   * next page. If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
+   * Output only. A token, which can be sent as `page_token` to retrieve the
+   * next page. If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListPartitionsRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListPartitionsRequest.java new file mode 100644 index 000000000000..39e9c64e794f --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListPartitionsRequest.java @@ -0,0 +1,891 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for ListPartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListPartitionsRequest} + */ +@com.google.protobuf.Generated +public final class ListPartitionsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.ListPartitionsRequest) + ListPartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListPartitionsRequest"); + } + + // Use ListPartitionsRequest.newBuilder() to construct. + private ListPartitionsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListPartitionsRequest() { + parent_ = ""; + filter_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest.class, + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to which these partitions belong, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to which these partitions belong, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
+   * Optional. SQL text filtering statement, similar to a `WHERE` clause in a
+   * query. Only supports single-row expressions.  Aggregate functions are not
+   * supported.
+   *
+   * Examples:
+   * * `"int_field > 5"`
+   * * `"date_field = CAST('2014-9-27' as DATE)"`
+   * * `"nullable_field is not NULL"`
+   * * `"st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"`
+   * * `"numeric_field BETWEEN 1.0 AND 5.0"`
+   *
+   * Restricted to a maximum length of 1 MB.
+   * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. SQL text filtering statement, similar to a `WHERE` clause in a
+   * query. Only supports single-row expressions.  Aggregate functions are not
+   * supported.
+   *
+   * Examples:
+   * * `"int_field > 5"`
+   * * `"date_field = CAST('2014-9-27' as DATE)"`
+   * * `"nullable_field is not NULL"`
+   * * `"st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"`
+   * * `"numeric_field BETWEEN 1.0 AND 5.0"`
+   *
+   * Restricted to a maximum length of 1 MB.
+   * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, filter_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, filter_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest other = + (com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for ListPartitions.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListPartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.ListPartitionsRequest) + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest.class, + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + filter_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest build() { + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest result = + new com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.filter_ = filter_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest other) { + if (other == com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these partitions belong, in the
+     * format of
+     * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a `WHERE` clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples:
+     * * `"int_field > 5"`
+     * * `"date_field = CAST('2014-9-27' as DATE)"`
+     * * `"nullable_field is not NULL"`
+     * * `"st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"`
+     * * `"numeric_field BETWEEN 1.0 AND 5.0"`
+     *
+     * Restricted to a maximum length of 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a `WHERE` clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples:
+     * * `"int_field > 5"`
+     * * `"date_field = CAST('2014-9-27' as DATE)"`
+     * * `"nullable_field is not NULL"`
+     * * `"st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"`
+     * * `"numeric_field BETWEEN 1.0 AND 5.0"`
+     *
+     * Restricted to a maximum length of 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a `WHERE` clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples:
+     * * `"int_field > 5"`
+     * * `"date_field = CAST('2014-9-27' as DATE)"`
+     * * `"nullable_field is not NULL"`
+     * * `"st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"`
+     * * `"numeric_field BETWEEN 1.0 AND 5.0"`
+     *
+     * Restricted to a maximum length of 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a `WHERE` clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples:
+     * * `"int_field > 5"`
+     * * `"date_field = CAST('2014-9-27' as DATE)"`
+     * * `"nullable_field is not NULL"`
+     * * `"st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"`
+     * * `"numeric_field BETWEEN 1.0 AND 5.0"`
+     *
+     * Restricted to a maximum length of 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a `WHERE` clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples:
+     * * `"int_field > 5"`
+     * * `"date_field = CAST('2014-9-27' as DATE)"`
+     * * `"nullable_field is not NULL"`
+     * * `"st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"`
+     * * `"numeric_field BETWEEN 1.0 AND 5.0"`
+     *
+     * Restricted to a maximum length of 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.ListPartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.ListPartitionsRequest) + private static final com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListPartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListPartitionsRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListPartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..959b3618b14a --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListPartitionsRequestOrBuilder.java @@ -0,0 +1,110 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface ListPartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.ListPartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to which these partitions belong, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to which these partitions belong, in the
+   * format of
+   * projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. SQL text filtering statement, similar to a `WHERE` clause in a
+   * query. Only supports single-row expressions.  Aggregate functions are not
+   * supported.
+   *
+   * Examples:
+   * * `"int_field > 5"`
+   * * `"date_field = CAST('2014-9-27' as DATE)"`
+   * * `"nullable_field is not NULL"`
+   * * `"st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"`
+   * * `"numeric_field BETWEEN 1.0 AND 5.0"`
+   *
+   * Restricted to a maximum length of 1 MB.
+   * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
+   * Optional. SQL text filtering statement, similar to a `WHERE` clause in a
+   * query. Only supports single-row expressions.  Aggregate functions are not
+   * supported.
+   *
+   * Examples:
+   * * `"int_field > 5"`
+   * * `"date_field = CAST('2014-9-27' as DATE)"`
+   * * `"nullable_field is not NULL"`
+   * * `"st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"`
+   * * `"numeric_field BETWEEN 1.0 AND 5.0"`
+   *
+   * Restricted to a maximum length of 1 MB.
+   * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListPartitionsResponse.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListPartitionsResponse.java new file mode 100644 index 000000000000..8e6a5fb98b07 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListPartitionsResponse.java @@ -0,0 +1,972 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Response message for ListPartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListPartitionsResponse} + */ +@com.google.protobuf.Generated +public final class ListPartitionsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.ListPartitionsResponse) + ListPartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListPartitionsResponse"); + } + + // Use ListPartitionsResponse.newBuilder() to construct. + private ListPartitionsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListPartitionsResponse() { + partitions_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse.class, + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse.Builder.class); + } + + public static final int PARTITIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List partitions_; + + /** + * + * + *
+   * Output only. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List getPartitionsList() { + return partitions_; + } + + /** + * + * + *
+   * Output only. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getPartitionsOrBuilderList() { + return partitions_; + } + + /** + * + * + *
+   * Output only. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getPartitionsCount() { + return partitions_.size(); + } + + /** + * + * + *
+   * Output only. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.Partition getPartitions(int index) { + return partitions_.get(index); + } + + /** + * + * + *
+   * Output only. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionsOrBuilder(int index) { + return partitions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < partitions_.size(); i++) { + output.writeMessage(1, partitions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < partitions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, partitions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse other = + (com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse) obj; + + if (!getPartitionsList().equals(other.getPartitionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPartitionsCount() > 0) { + hash = (37 * hash) + PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for ListPartitions.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.ListPartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.ListPartitionsResponse) + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse.class, + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + } else { + partitions_ = null; + partitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_ListPartitionsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse build() { + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse buildPartial() { + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse result = + new com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse result) { + if (partitionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + partitions_ = java.util.Collections.unmodifiableList(partitions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.partitions_ = partitions_; + } else { + result.partitions_ = partitionsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse other) { + if (other == com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse.getDefaultInstance()) + return this; + if (partitionsBuilder_ == null) { + if (!other.partitions_.isEmpty()) { + if (partitions_.isEmpty()) { + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePartitionsIsMutable(); + partitions_.addAll(other.partitions_); + } + onChanged(); + } + } else { + if (!other.partitions_.isEmpty()) { + if (partitionsBuilder_.isEmpty()) { + partitionsBuilder_.dispose(); + partitionsBuilder_ = null; + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + partitionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetPartitionsFieldBuilder() + : null; + } else { + partitionsBuilder_.addAllMessages(other.partitions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.biglake.hive.v1beta.Partition m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.Partition.parser(), extensionRegistry); + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(m); + } else { + partitionsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List partitions_ = + java.util.Collections.emptyList(); + + private void ensurePartitionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + partitions_ = + new java.util.ArrayList(partitions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder> + partitionsBuilder_; + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List getPartitionsList() { + if (partitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitions_); + } else { + return partitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getPartitionsCount() { + if (partitionsBuilder_ == null) { + return partitions_.size(); + } else { + return partitionsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.Partition getPartitions(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setPartitions(int index, com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.set(index, value); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setPartitions( + int index, com.google.cloud.biglake.hive.v1beta.Partition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addPartitions(com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(value); + onChanged(); + } else { + partitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addPartitions(int index, com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(index, value); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addPartitions( + com.google.cloud.biglake.hive.v1beta.Partition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addPartitions( + int index, com.google.cloud.biglake.hive.v1beta.Partition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllPartitions( + java.lang.Iterable values) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitions_); + onChanged(); + } else { + partitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearPartitions() { + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + partitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removePartitions(int index) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.remove(index); + onChanged(); + } else { + partitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.Partition.Builder getPartitionsBuilder(int index) { + return internalGetPartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionsOrBuilder( + int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getPartitionsOrBuilderList() { + if (partitionsBuilder_ != null) { + return partitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitions_); + } + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.Partition.Builder addPartitionsBuilder() { + return internalGetPartitionsFieldBuilder() + .addBuilder(com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.biglake.hive.v1beta.Partition.Builder addPartitionsBuilder(int index) { + return internalGetPartitionsFieldBuilder() + .addBuilder(index, com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getPartitionsBuilderList() { + return internalGetPartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder> + internalGetPartitionsFieldBuilder() { + if (partitionsBuilder_ == null) { + partitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder>( + partitions_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + partitions_ = null; + } + return partitionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.ListPartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.ListPartitionsResponse) + private static final com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse(); + } + + public static com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListPartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListPartitionsResponseOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListPartitionsResponseOrBuilder.java new file mode 100644 index 000000000000..61a39a4d1f3d --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ListPartitionsResponseOrBuilder.java @@ -0,0 +1,94 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface ListPartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.ListPartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getPartitionsList(); + + /** + * + * + *
+   * Output only. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.biglake.hive.v1beta.Partition getPartitions(int index); + + /** + * + * + *
+   * Output only. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getPartitionsCount(); + + /** + * + * + *
+   * Output only. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getPartitionsOrBuilderList(); + + /** + * + * + *
+   * Output only. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.Partition partitions = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionsOrBuilder(int index); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/NamespaceName.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/NamespaceName.java new file mode 100644 index 000000000000..a510c05362a2 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/NamespaceName.java @@ -0,0 +1,223 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class NamespaceName implements ResourceName { + private static final PathTemplate PROJECT_CATALOG_DATABASE = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/catalogs/{catalog}/databases/{database}"); + private volatile Map fieldValuesMap; + private final String project; + private final String catalog; + private final String database; + + @Deprecated + protected NamespaceName() { + project = null; + catalog = null; + database = null; + } + + private NamespaceName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + catalog = Preconditions.checkNotNull(builder.getCatalog()); + database = Preconditions.checkNotNull(builder.getDatabase()); + } + + public String getProject() { + return project; + } + + public String getCatalog() { + return catalog; + } + + public String getDatabase() { + return database; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static NamespaceName of(String project, String catalog, String database) { + return newBuilder().setProject(project).setCatalog(catalog).setDatabase(database).build(); + } + + public static String format(String project, String catalog, String database) { + return newBuilder() + .setProject(project) + .setCatalog(catalog) + .setDatabase(database) + .build() + .toString(); + } + + public static NamespaceName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_CATALOG_DATABASE.validatedMatch( + formattedString, "NamespaceName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("catalog"), matchMap.get("database")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (NamespaceName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_CATALOG_DATABASE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (catalog != null) { + fieldMapBuilder.put("catalog", catalog); + } + if (database != null) { + fieldMapBuilder.put("database", database); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_CATALOG_DATABASE.instantiate( + "project", project, "catalog", catalog, "database", database); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + NamespaceName that = ((NamespaceName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.catalog, that.catalog) + && Objects.equals(this.database, that.database); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(catalog); + h *= 1000003; + h ^= Objects.hashCode(database); + return h; + } + + /** Builder for projects/{project}/catalogs/{catalog}/databases/{database}. */ + public static class Builder { + private String project; + private String catalog; + private String database; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getCatalog() { + return catalog; + } + + public String getDatabase() { + return database; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setCatalog(String catalog) { + this.catalog = catalog; + return this; + } + + public Builder setDatabase(String database) { + this.database = database; + return this; + } + + private Builder(NamespaceName namespaceName) { + this.project = namespaceName.project; + this.catalog = namespaceName.catalog; + this.database = namespaceName.database; + } + + public NamespaceName build() { + return new NamespaceName(this); + } + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/Partition.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/Partition.java new file mode 100644 index 000000000000..2a65bf4def28 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/Partition.java @@ -0,0 +1,2296 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Information about a Hive partition.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.Partition} + */ +@com.google.protobuf.Generated +public final class Partition extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.Partition) + PartitionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Partition"); + } + + // Use Partition.newBuilder() to construct. + private Partition(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Partition() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + fields_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_Partition_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_Partition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.Partition.class, + com.google.cloud.biglake.hive.v1beta.Partition.Builder.class); + } + + private int bitField0_; + public static final int VALUES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + return values_; + } + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + public static final int CREATE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int STORAGE_DESCRIPTOR_FIELD_NUMBER = 3; + private com.google.cloud.biglake.hive.v1beta.StorageDescriptor storageDescriptor_; + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the storageDescriptor field is set. + */ + @java.lang.Override + public boolean hasStorageDescriptor() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The storageDescriptor. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor getStorageDescriptor() { + return storageDescriptor_ == null + ? com.google.cloud.biglake.hive.v1beta.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder + getStorageDescriptorOrBuilder() { + return storageDescriptor_ == null + ? com.google.cloud.biglake.hive.v1beta.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } + + public static final int PARAMETERS_FIELD_NUMBER = 4; + + private static final class ParametersDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_Partition_ParametersEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField(ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int FIELDS_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private java.util.List fields_; + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getFieldsList() { + return fields_; + } + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getFieldsOrBuilderList() { + return fields_; + } + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getFieldsCount() { + return fields_.size(); + } + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.FieldSchema getFields(int index) { + return fields_.get(index); + } + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder getFieldsOrBuilder(int index) { + return fields_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < values_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, values_.getRaw(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getStorageDescriptor()); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetParameters(), ParametersDefaultEntryHolder.defaultEntry, 4); + for (int i = 0; i < fields_.size(); i++) { + output.writeMessage(5, fields_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < values_.size(); i++) { + dataSize += computeStringSizeNoTag(values_.getRaw(i)); + } + size += dataSize; + size += 1 * getValuesList().size(); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getStorageDescriptor()); + } + for (java.util.Map.Entry entry : + internalGetParameters().getMap().entrySet()) { + com.google.protobuf.MapEntry parameters__ = + ParametersDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, parameters__); + } + for (int i = 0; i < fields_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, fields_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.Partition)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.Partition other = + (com.google.cloud.biglake.hive.v1beta.Partition) obj; + + if (!getValuesList().equals(other.getValuesList())) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasStorageDescriptor() != other.hasStorageDescriptor()) return false; + if (hasStorageDescriptor()) { + if (!getStorageDescriptor().equals(other.getStorageDescriptor())) return false; + } + if (!internalGetParameters().equals(other.internalGetParameters())) return false; + if (!getFieldsList().equals(other.getFieldsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasStorageDescriptor()) { + hash = (37 * hash) + STORAGE_DESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getStorageDescriptor().hashCode(); + } + if (!internalGetParameters().getMap().isEmpty()) { + hash = (37 * hash) + PARAMETERS_FIELD_NUMBER; + hash = (53 * hash) + internalGetParameters().hashCode(); + } + if (getFieldsCount() > 0) { + hash = (37 * hash) + FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getFieldsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.Partition parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.Partition parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.Partition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.Partition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.Partition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.Partition parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.Partition parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.Partition parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.Partition parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.Partition parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.Partition parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.Partition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.biglake.hive.v1beta.Partition prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information about a Hive partition.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.Partition} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.Partition) + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_Partition_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetMutableParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_Partition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.Partition.class, + com.google.cloud.biglake.hive.v1beta.Partition.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.Partition.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCreateTimeFieldBuilder(); + internalGetStorageDescriptorFieldBuilder(); + internalGetFieldsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + storageDescriptor_ = null; + if (storageDescriptorBuilder_ != null) { + storageDescriptorBuilder_.dispose(); + storageDescriptorBuilder_ = null; + } + internalGetMutableParameters().clear(); + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + } else { + fields_ = null; + fieldsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_Partition_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.Partition getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.Partition build() { + com.google.cloud.biglake.hive.v1beta.Partition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.Partition buildPartial() { + com.google.cloud.biglake.hive.v1beta.Partition result = + new com.google.cloud.biglake.hive.v1beta.Partition(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.cloud.biglake.hive.v1beta.Partition result) { + if (fieldsBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.fields_ = fields_; + } else { + result.fields_ = fieldsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.Partition result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + values_.makeImmutable(); + result.values_ = values_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.storageDescriptor_ = + storageDescriptorBuilder_ == null + ? storageDescriptor_ + : storageDescriptorBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.parameters_ = internalGetParameters(); + result.parameters_.makeImmutable(); + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.Partition) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.Partition) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.Partition other) { + if (other == com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance()) return this; + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ |= 0x00000001; + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasStorageDescriptor()) { + mergeStorageDescriptor(other.getStorageDescriptor()); + } + internalGetMutableParameters().mergeFrom(other.internalGetParameters()); + bitField0_ |= 0x00000008; + if (fieldsBuilder_ == null) { + if (!other.fields_.isEmpty()) { + if (fields_.isEmpty()) { + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureFieldsIsMutable(); + fields_.addAll(other.fields_); + } + onChanged(); + } + } else { + if (!other.fields_.isEmpty()) { + if (fieldsBuilder_.isEmpty()) { + fieldsBuilder_.dispose(); + fieldsBuilder_ = null; + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000010); + fieldsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetFieldsFieldBuilder() + : null; + } else { + fieldsBuilder_.addAllMessages(other.fields_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureValuesIsMutable(); + values_.add(s); + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetStorageDescriptorFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.protobuf.MapEntry parameters__ = + input.readMessage( + ParametersDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableParameters() + .getMutableMap() + .put(parameters__.getKey(), parameters__.getValue()); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + com.google.cloud.biglake.hive.v1beta.FieldSchema m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.FieldSchema.parser(), + extensionRegistry); + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(m); + } else { + fieldsBuilder_.addMessage(m); + } + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureValuesIsMutable() { + if (!values_.isModifiable()) { + values_ = new com.google.protobuf.LazyStringArrayList(values_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + values_.makeImmutable(); + return values_; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The values to set. + * @return This builder for chaining. + */ + public Builder setValues(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The values to add. + * @return This builder for chaining. + */ + public Builder addValues(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The values to add. + * @return This builder for chaining. + */ + public Builder addAllValues(java.lang.Iterable values) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearValues() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the values to add. + * @return This builder for chaining. + */ + public Builder addValuesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000002); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.cloud.biglake.hive.v1beta.StorageDescriptor storageDescriptor_; + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder> + storageDescriptorBuilder_; + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the storageDescriptor field is set. + */ + public boolean hasStorageDescriptor() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The storageDescriptor. + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor getStorageDescriptor() { + if (storageDescriptorBuilder_ == null) { + return storageDescriptor_ == null + ? com.google.cloud.biglake.hive.v1beta.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } else { + return storageDescriptorBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setStorageDescriptor( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor value) { + if (storageDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + storageDescriptor_ = value; + } else { + storageDescriptorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setStorageDescriptor( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Builder builderForValue) { + if (storageDescriptorBuilder_ == null) { + storageDescriptor_ = builderForValue.build(); + } else { + storageDescriptorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeStorageDescriptor( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor value) { + if (storageDescriptorBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && storageDescriptor_ != null + && storageDescriptor_ + != com.google.cloud.biglake.hive.v1beta.StorageDescriptor.getDefaultInstance()) { + getStorageDescriptorBuilder().mergeFrom(value); + } else { + storageDescriptor_ = value; + } + } else { + storageDescriptorBuilder_.mergeFrom(value); + } + if (storageDescriptor_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearStorageDescriptor() { + bitField0_ = (bitField0_ & ~0x00000004); + storageDescriptor_ = null; + if (storageDescriptorBuilder_ != null) { + storageDescriptorBuilder_.dispose(); + storageDescriptorBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Builder + getStorageDescriptorBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetStorageDescriptorFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder + getStorageDescriptorOrBuilder() { + if (storageDescriptorBuilder_ != null) { + return storageDescriptorBuilder_.getMessageOrBuilder(); + } else { + return storageDescriptor_ == null + ? com.google.cloud.biglake.hive.v1beta.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder> + internalGetStorageDescriptorFieldBuilder() { + if (storageDescriptorBuilder_ == null) { + storageDescriptorBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder>( + getStorageDescriptor(), getParentForChildren(), isClean()); + storageDescriptor_ = null; + } + return storageDescriptorBuilder_; + } + + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField + internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + private com.google.protobuf.MapField + internalGetMutableParameters() { + if (parameters_ == null) { + parameters_ = + com.google.protobuf.MapField.newMapField(ParametersDefaultEntryHolder.defaultEntry); + } + if (!parameters_.isMutable()) { + parameters_ = parameters_.copy(); + } + bitField0_ |= 0x00000008; + onChanged(); + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearParameters() { + bitField0_ = (bitField0_ & ~0x00000008); + internalGetMutableParameters().getMutableMap().clear(); + return this; + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableParameters().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableParameters() { + bitField0_ |= 0x00000008; + return internalGetMutableParameters().getMutableMap(); + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putParameters(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableParameters().getMutableMap().put(key, value); + bitField0_ |= 0x00000008; + return this; + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllParameters(java.util.Map values) { + internalGetMutableParameters().getMutableMap().putAll(values); + bitField0_ |= 0x00000008; + return this; + } + + private java.util.List fields_ = + java.util.Collections.emptyList(); + + private void ensureFieldsIsMutable() { + if (!((bitField0_ & 0x00000010) != 0)) { + fields_ = + new java.util.ArrayList(fields_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.FieldSchema, + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder, + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder> + fieldsBuilder_; + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getFieldsList() { + if (fieldsBuilder_ == null) { + return java.util.Collections.unmodifiableList(fields_); + } else { + return fieldsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getFieldsCount() { + if (fieldsBuilder_ == null) { + return fields_.size(); + } else { + return fieldsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchema getFields(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields(int index, com.google.cloud.biglake.hive.v1beta.FieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.set(index, value); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.set(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields(com.google.cloud.biglake.hive.v1beta.FieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(value); + onChanged(); + } else { + fieldsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields(int index, com.google.cloud.biglake.hive.v1beta.FieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(index, value); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllFields( + java.lang.Iterable values) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); + onChanged(); + } else { + fieldsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearFields() { + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + fieldsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeFields(int index) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.remove(index); + onChanged(); + } else { + fieldsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder getFieldsBuilder(int index) { + return internalGetFieldsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder getFieldsOrBuilder(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsOrBuilderList() { + if (fieldsBuilder_ != null) { + return fieldsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(fields_); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder addFieldsBuilder() { + return internalGetFieldsFieldBuilder() + .addBuilder(com.google.cloud.biglake.hive.v1beta.FieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder addFieldsBuilder(int index) { + return internalGetFieldsFieldBuilder() + .addBuilder(index, com.google.cloud.biglake.hive.v1beta.FieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsBuilderList() { + return internalGetFieldsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.FieldSchema, + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder, + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder> + internalGetFieldsFieldBuilder() { + if (fieldsBuilder_ == null) { + fieldsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.FieldSchema, + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder, + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder>( + fields_, ((bitField0_ & 0x00000010) != 0), getParentForChildren(), isClean()); + fields_ = null; + } + return fieldsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.Partition) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.Partition) + private static final com.google.cloud.biglake.hive.v1beta.Partition DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.Partition(); + } + + public static com.google.cloud.biglake.hive.v1beta.Partition getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Partition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.Partition getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/PartitionOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/PartitionOrBuilder.java new file mode 100644 index 000000000000..5d0e3974282b --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/PartitionOrBuilder.java @@ -0,0 +1,315 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface PartitionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.Partition) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + java.util.List getValuesList(); + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + int getValuesCount(); + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + java.lang.String getValues(int index); + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + com.google.protobuf.ByteString getValuesBytes(int index); + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the storageDescriptor field is set. + */ + boolean hasStorageDescriptor(); + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The storageDescriptor. + */ + com.google.cloud.biglake.hive.v1beta.StorageDescriptor getStorageDescriptor(); + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder getStorageDescriptorOrBuilder(); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getParametersCount(); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsParameters(java.lang.String key); + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Deprecated + java.util.Map getParameters(); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getParametersMap(); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + /* nullable */ + java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getParametersOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getFieldsList(); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.biglake.hive.v1beta.FieldSchema getFields(int index); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getFieldsCount(); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getFieldsOrBuilderList(); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder getFieldsOrBuilder(int index); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/PartitionValues.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/PartitionValues.java new file mode 100644 index 000000000000..99844f4b5166 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/PartitionValues.java @@ -0,0 +1,710 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Represents the values of a partition.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.PartitionValues} + */ +@com.google.protobuf.Generated +public final class PartitionValues extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.PartitionValues) + PartitionValuesOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PartitionValues"); + } + + // Use PartitionValues.newBuilder() to construct. + private PartitionValues(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PartitionValues() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_PartitionValues_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_PartitionValues_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.PartitionValues.class, + com.google.cloud.biglake.hive.v1beta.PartitionValues.Builder.class); + } + + public static final int VALUES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + return values_; + } + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < values_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, values_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < values_.size(); i++) { + dataSize += computeStringSizeNoTag(values_.getRaw(i)); + } + size += dataSize; + size += 1 * getValuesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.PartitionValues)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.PartitionValues other = + (com.google.cloud.biglake.hive.v1beta.PartitionValues) obj; + + if (!getValuesList().equals(other.getValuesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.biglake.hive.v1beta.PartitionValues prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Represents the values of a partition.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.PartitionValues} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.PartitionValues) + com.google.cloud.biglake.hive.v1beta.PartitionValuesOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_PartitionValues_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_PartitionValues_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.PartitionValues.class, + com.google.cloud.biglake.hive.v1beta.PartitionValues.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.PartitionValues.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_PartitionValues_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.PartitionValues getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.PartitionValues.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.PartitionValues build() { + com.google.cloud.biglake.hive.v1beta.PartitionValues result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.PartitionValues buildPartial() { + com.google.cloud.biglake.hive.v1beta.PartitionValues result = + new com.google.cloud.biglake.hive.v1beta.PartitionValues(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.PartitionValues result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + values_.makeImmutable(); + result.values_ = values_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.PartitionValues) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.PartitionValues) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.PartitionValues other) { + if (other == com.google.cloud.biglake.hive.v1beta.PartitionValues.getDefaultInstance()) + return this; + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ |= 0x00000001; + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureValuesIsMutable(); + values_.add(s); + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureValuesIsMutable() { + if (!values_.isModifiable()) { + values_ = new com.google.protobuf.LazyStringArrayList(values_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + values_.makeImmutable(); + return values_; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The values to set. + * @return This builder for chaining. + */ + public Builder setValues(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The values to add. + * @return This builder for chaining. + */ + public Builder addValues(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The values to add. + * @return This builder for chaining. + */ + public Builder addAllValues(java.lang.Iterable values) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearValues() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the values to add. + * @return This builder for chaining. + */ + public Builder addValuesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.PartitionValues) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.PartitionValues) + private static final com.google.cloud.biglake.hive.v1beta.PartitionValues DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.PartitionValues(); + } + + public static com.google.cloud.biglake.hive.v1beta.PartitionValues getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PartitionValues parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.PartitionValues getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/PartitionValuesOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/PartitionValuesOrBuilder.java new file mode 100644 index 000000000000..1a554f637a2c --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/PartitionValuesOrBuilder.java @@ -0,0 +1,86 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface PartitionValuesOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.PartitionValues) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + java.util.List getValuesList(); + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + int getValuesCount(); + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + java.lang.String getValues(int index); + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + com.google.protobuf.ByteString getValuesBytes(int index); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ProjectName.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ProjectName.java new file mode 100644 index 000000000000..6f43bea28907 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/ProjectName.java @@ -0,0 +1,168 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ProjectName implements ResourceName { + private static final PathTemplate PROJECT = + PathTemplate.createWithoutUrlEncoding("projects/{project}"); + private volatile Map fieldValuesMap; + private final String project; + + @Deprecated + protected ProjectName() { + project = null; + } + + private ProjectName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + } + + public String getProject() { + return project; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ProjectName of(String project) { + return newBuilder().setProject(project).build(); + } + + public static String format(String project) { + return newBuilder().setProject(project).build().toString(); + } + + public static ProjectName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT.validatedMatch( + formattedString, "ProjectName.parse: formattedString not in valid format"); + return of(matchMap.get("project")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ProjectName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT.instantiate("project", project); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + ProjectName that = ((ProjectName) o); + return Objects.equals(this.project, that.project); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + return h; + } + + /** Builder for projects/{project}. */ + public static class Builder { + private String project; + + protected Builder() {} + + public String getProject() { + return project; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + private Builder(ProjectName projectName) { + this.project = projectName.project; + } + + public ProjectName build() { + return new ProjectName(this); + } + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/SerdeInfo.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/SerdeInfo.java new file mode 100644 index 000000000000..1217a07cd3ab --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/SerdeInfo.java @@ -0,0 +1,2088 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Serialization and deserialization information.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.SerdeInfo} + */ +@com.google.protobuf.Generated +public final class SerdeInfo extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.SerdeInfo) + SerdeInfoOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SerdeInfo"); + } + + // Use SerdeInfo.newBuilder() to construct. + private SerdeInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SerdeInfo() { + name_ = ""; + serializationLib_ = ""; + description_ = ""; + serializerClass_ = ""; + deserializerClass_ = ""; + serdeType_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.SerdeInfo.class, + com.google.cloud.biglake.hive.v1beta.SerdeInfo.Builder.class); + } + + /** + * + * + *
+   * The serde types.
+   * 
+ * + * Protobuf enum {@code google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType} + */ + public enum SerdeType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * The serde type is not specified.
+     * 
+ * + * SERDE_TYPE_UNSPECIFIED = 0; + */ + SERDE_TYPE_UNSPECIFIED(0), + /** + * + * + *
+     * Hive.
+     * 
+ * + * HIVE = 1; + */ + HIVE(1), + /** + * + * + *
+     * Schema registry.
+     * 
+ * + * SCHEMA_REGISTRY = 2; + */ + SCHEMA_REGISTRY(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SerdeType"); + } + + /** + * + * + *
+     * The serde type is not specified.
+     * 
+ * + * SERDE_TYPE_UNSPECIFIED = 0; + */ + public static final int SERDE_TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * Hive.
+     * 
+ * + * HIVE = 1; + */ + public static final int HIVE_VALUE = 1; + + /** + * + * + *
+     * Schema registry.
+     * 
+ * + * SCHEMA_REGISTRY = 2; + */ + public static final int SCHEMA_REGISTRY_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SerdeType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static SerdeType forNumber(int value) { + switch (value) { + case 0: + return SERDE_TYPE_UNSPECIFIED; + case 1: + return HIVE; + case 2: + return SCHEMA_REGISTRY; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public SerdeType findValueByNumber(int number) { + return SerdeType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.SerdeInfo.getDescriptor().getEnumTypes().get(0); + } + + private static final SerdeType[] VALUES = values(); + + public static SerdeType valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private SerdeType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType) + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the SerDe. Table name by default. The maximum length is
+   * 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the SerDe. Table name by default. The maximum length is
+   * 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SERIALIZATION_LIB_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object serializationLib_ = ""; + + /** + * + * + *
+   * Required. The fully qualified Java class name of the serialization library.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string serialization_lib = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The serializationLib. + */ + @java.lang.Override + public java.lang.String getSerializationLib() { + java.lang.Object ref = serializationLib_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serializationLib_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The fully qualified Java class name of the serialization library.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string serialization_lib = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for serializationLib. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializationLibBytes() { + java.lang.Object ref = serializationLib_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + serializationLib_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESCRIPTION_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object description_ = ""; + + /** + * + * + *
+   * Optional. Description of the serde. The maximum length is 4000 characters.
+   * 
+ * + * string description = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + @java.lang.Override + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Description of the serde. The maximum length is 4000 characters.
+   * 
+ * + * string description = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARAMETERS_FIELD_NUMBER = 4; + + private static final class ParametersDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_ParametersEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField(ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+   * Optional. Parameters of the serde. The maximum size is 10Kib.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+   * Optional. Parameters of the serde. The maximum size is 10Kib.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+   * Optional. Parameters of the serde. The maximum size is 10Kib.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * Optional. Parameters of the serde. The maximum size is 10Kib.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int SERIALIZER_CLASS_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object serializerClass_ = ""; + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the serializer. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string serializer_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The serializerClass. + */ + @java.lang.Override + public java.lang.String getSerializerClass() { + java.lang.Object ref = serializerClass_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serializerClass_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the serializer. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string serializer_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for serializerClass. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializerClassBytes() { + java.lang.Object ref = serializerClass_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + serializerClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESERIALIZER_CLASS_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object deserializerClass_ = ""; + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the deserializer. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string deserializer_class = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The deserializerClass. + */ + @java.lang.Override + public java.lang.String getDeserializerClass() { + java.lang.Object ref = deserializerClass_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + deserializerClass_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the deserializer. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string deserializer_class = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for deserializerClass. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDeserializerClassBytes() { + java.lang.Object ref = deserializerClass_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + deserializerClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SERDE_TYPE_FIELD_NUMBER = 7; + private int serdeType_ = 0; + + /** + * + * + *
+   * Optional. The serde type.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType serde_type = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for serdeType. + */ + @java.lang.Override + public int getSerdeTypeValue() { + return serdeType_; + } + + /** + * + * + *
+   * Optional. The serde type.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType serde_type = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The serdeType. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType getSerdeType() { + com.google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType result = + com.google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType.forNumber(serdeType_); + return result == null + ? com.google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(serializationLib_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, serializationLib_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(description_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, description_); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetParameters(), ParametersDefaultEntryHolder.defaultEntry, 4); + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(serializerClass_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, serializerClass_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(deserializerClass_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, deserializerClass_); + } + if (serdeType_ + != com.google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType.SERDE_TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(7, serdeType_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(serializationLib_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, serializationLib_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(description_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, description_); + } + for (java.util.Map.Entry entry : + internalGetParameters().getMap().entrySet()) { + com.google.protobuf.MapEntry parameters__ = + ParametersDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, parameters__); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(serializerClass_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, serializerClass_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(deserializerClass_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, deserializerClass_); + } + if (serdeType_ + != com.google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType.SERDE_TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(7, serdeType_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.SerdeInfo)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.SerdeInfo other = + (com.google.cloud.biglake.hive.v1beta.SerdeInfo) obj; + + if (!getName().equals(other.getName())) return false; + if (!getSerializationLib().equals(other.getSerializationLib())) return false; + if (!getDescription().equals(other.getDescription())) return false; + if (!internalGetParameters().equals(other.internalGetParameters())) return false; + if (!getSerializerClass().equals(other.getSerializerClass())) return false; + if (!getDeserializerClass().equals(other.getDeserializerClass())) return false; + if (serdeType_ != other.serdeType_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + SERIALIZATION_LIB_FIELD_NUMBER; + hash = (53 * hash) + getSerializationLib().hashCode(); + hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; + hash = (53 * hash) + getDescription().hashCode(); + if (!internalGetParameters().getMap().isEmpty()) { + hash = (37 * hash) + PARAMETERS_FIELD_NUMBER; + hash = (53 * hash) + internalGetParameters().hashCode(); + } + hash = (37 * hash) + SERIALIZER_CLASS_FIELD_NUMBER; + hash = (53 * hash) + getSerializerClass().hashCode(); + hash = (37 * hash) + DESERIALIZER_CLASS_FIELD_NUMBER; + hash = (53 * hash) + getDeserializerClass().hashCode(); + hash = (37 * hash) + SERDE_TYPE_FIELD_NUMBER; + hash = (53 * hash) + serdeType_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.biglake.hive.v1beta.SerdeInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Serialization and deserialization information.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.SerdeInfo} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.SerdeInfo) + com.google.cloud.biglake.hive.v1beta.SerdeInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetMutableParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.SerdeInfo.class, + com.google.cloud.biglake.hive.v1beta.SerdeInfo.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.SerdeInfo.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + serializationLib_ = ""; + description_ = ""; + internalGetMutableParameters().clear(); + serializerClass_ = ""; + deserializerClass_ = ""; + serdeType_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_SerdeInfo_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.SerdeInfo getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.SerdeInfo.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.SerdeInfo build() { + com.google.cloud.biglake.hive.v1beta.SerdeInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.SerdeInfo buildPartial() { + com.google.cloud.biglake.hive.v1beta.SerdeInfo result = + new com.google.cloud.biglake.hive.v1beta.SerdeInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.SerdeInfo result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.serializationLib_ = serializationLib_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.description_ = description_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.parameters_ = internalGetParameters(); + result.parameters_.makeImmutable(); + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.serializerClass_ = serializerClass_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.deserializerClass_ = deserializerClass_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.serdeType_ = serdeType_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.SerdeInfo) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.SerdeInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.SerdeInfo other) { + if (other == com.google.cloud.biglake.hive.v1beta.SerdeInfo.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getSerializationLib().isEmpty()) { + serializationLib_ = other.serializationLib_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getDescription().isEmpty()) { + description_ = other.description_; + bitField0_ |= 0x00000004; + onChanged(); + } + internalGetMutableParameters().mergeFrom(other.internalGetParameters()); + bitField0_ |= 0x00000008; + if (!other.getSerializerClass().isEmpty()) { + serializerClass_ = other.serializerClass_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (!other.getDeserializerClass().isEmpty()) { + deserializerClass_ = other.deserializerClass_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (other.serdeType_ != 0) { + setSerdeTypeValue(other.getSerdeTypeValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + serializationLib_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + description_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.protobuf.MapEntry parameters__ = + input.readMessage( + ParametersDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableParameters() + .getMutableMap() + .put(parameters__.getKey(), parameters__.getValue()); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + serializerClass_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + deserializerClass_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 56: + { + serdeType_ = input.readEnum(); + bitField0_ |= 0x00000040; + break; + } // case 56 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the SerDe. Table name by default. The maximum length is
+     * 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the SerDe. Table name by default. The maximum length is
+     * 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the SerDe. Table name by default. The maximum length is
+     * 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the SerDe. Table name by default. The maximum length is
+     * 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the SerDe. Table name by default. The maximum length is
+     * 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object serializationLib_ = ""; + + /** + * + * + *
+     * Required. The fully qualified Java class name of the serialization library.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string serialization_lib = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The serializationLib. + */ + public java.lang.String getSerializationLib() { + java.lang.Object ref = serializationLib_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serializationLib_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The fully qualified Java class name of the serialization library.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string serialization_lib = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for serializationLib. + */ + public com.google.protobuf.ByteString getSerializationLibBytes() { + java.lang.Object ref = serializationLib_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + serializationLib_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The fully qualified Java class name of the serialization library.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string serialization_lib = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The serializationLib to set. + * @return This builder for chaining. + */ + public Builder setSerializationLib(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + serializationLib_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The fully qualified Java class name of the serialization library.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string serialization_lib = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearSerializationLib() { + serializationLib_ = getDefaultInstance().getSerializationLib(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The fully qualified Java class name of the serialization library.
+     * The maximum length is 4000 characters.
+     * 
+ * + * string serialization_lib = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for serializationLib to set. + * @return This builder for chaining. + */ + public Builder setSerializationLibBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + serializationLib_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object description_ = ""; + + /** + * + * + *
+     * Optional. Description of the serde. The maximum length is 4000 characters.
+     * 
+ * + * string description = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Description of the serde. The maximum length is 4000 characters.
+     * 
+ * + * string description = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Description of the serde. The maximum length is 4000 characters.
+     * 
+ * + * string description = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The description to set. + * @return This builder for chaining. + */ + public Builder setDescription(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + description_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Description of the serde. The maximum length is 4000 characters.
+     * 
+ * + * string description = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDescription() { + description_ = getDefaultInstance().getDescription(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Description of the serde. The maximum length is 4000 characters.
+     * 
+ * + * string description = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for description to set. + * @return This builder for chaining. + */ + public Builder setDescriptionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + description_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField + internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + private com.google.protobuf.MapField + internalGetMutableParameters() { + if (parameters_ == null) { + parameters_ = + com.google.protobuf.MapField.newMapField(ParametersDefaultEntryHolder.defaultEntry); + } + if (!parameters_.isMutable()) { + parameters_ = parameters_.copy(); + } + bitField0_ |= 0x00000008; + onChanged(); + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+     * Optional. Parameters of the serde. The maximum size is 10Kib.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+     * Optional. Parameters of the serde. The maximum size is 10Kib.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+     * Optional. Parameters of the serde. The maximum size is 10Kib.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+     * Optional. Parameters of the serde. The maximum size is 10Kib.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearParameters() { + bitField0_ = (bitField0_ & ~0x00000008); + internalGetMutableParameters().getMutableMap().clear(); + return this; + } + + /** + * + * + *
+     * Optional. Parameters of the serde. The maximum size is 10Kib.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableParameters().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableParameters() { + bitField0_ |= 0x00000008; + return internalGetMutableParameters().getMutableMap(); + } + + /** + * + * + *
+     * Optional. Parameters of the serde. The maximum size is 10Kib.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putParameters(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableParameters().getMutableMap().put(key, value); + bitField0_ |= 0x00000008; + return this; + } + + /** + * + * + *
+     * Optional. Parameters of the serde. The maximum size is 10Kib.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllParameters(java.util.Map values) { + internalGetMutableParameters().getMutableMap().putAll(values); + bitField0_ |= 0x00000008; + return this; + } + + private java.lang.Object serializerClass_ = ""; + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the serializer. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string serializer_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The serializerClass. + */ + public java.lang.String getSerializerClass() { + java.lang.Object ref = serializerClass_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serializerClass_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the serializer. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string serializer_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for serializerClass. + */ + public com.google.protobuf.ByteString getSerializerClassBytes() { + java.lang.Object ref = serializerClass_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + serializerClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the serializer. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string serializer_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The serializerClass to set. + * @return This builder for chaining. + */ + public Builder setSerializerClass(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + serializerClass_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the serializer. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string serializer_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearSerializerClass() { + serializerClass_ = getDefaultInstance().getSerializerClass(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the serializer. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string serializer_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for serializerClass to set. + * @return This builder for chaining. + */ + public Builder setSerializerClassBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + serializerClass_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private java.lang.Object deserializerClass_ = ""; + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the deserializer. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string deserializer_class = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The deserializerClass. + */ + public java.lang.String getDeserializerClass() { + java.lang.Object ref = deserializerClass_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + deserializerClass_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the deserializer. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string deserializer_class = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for deserializerClass. + */ + public com.google.protobuf.ByteString getDeserializerClassBytes() { + java.lang.Object ref = deserializerClass_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + deserializerClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the deserializer. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string deserializer_class = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The deserializerClass to set. + * @return This builder for chaining. + */ + public Builder setDeserializerClass(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + deserializerClass_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the deserializer. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string deserializer_class = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDeserializerClass() { + deserializerClass_ = getDefaultInstance().getDeserializerClass(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the deserializer. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string deserializer_class = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for deserializerClass to set. + * @return This builder for chaining. + */ + public Builder setDeserializerClassBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + deserializerClass_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private int serdeType_ = 0; + + /** + * + * + *
+     * Optional. The serde type.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType serde_type = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for serdeType. + */ + @java.lang.Override + public int getSerdeTypeValue() { + return serdeType_; + } + + /** + * + * + *
+     * Optional. The serde type.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType serde_type = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for serdeType to set. + * @return This builder for chaining. + */ + public Builder setSerdeTypeValue(int value) { + serdeType_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The serde type.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType serde_type = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The serdeType. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType getSerdeType() { + com.google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType result = + com.google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType.forNumber(serdeType_); + return result == null + ? com.google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Optional. The serde type.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType serde_type = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The serdeType to set. + * @return This builder for chaining. + */ + public Builder setSerdeType(com.google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000040; + serdeType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The serde type.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType serde_type = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearSerdeType() { + bitField0_ = (bitField0_ & ~0x00000040); + serdeType_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.SerdeInfo) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.SerdeInfo) + private static final com.google.cloud.biglake.hive.v1beta.SerdeInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.SerdeInfo(); + } + + public static com.google.cloud.biglake.hive.v1beta.SerdeInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SerdeInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.SerdeInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/SerdeInfoOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/SerdeInfoOrBuilder.java new file mode 100644 index 000000000000..54fa8686b71a --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/SerdeInfoOrBuilder.java @@ -0,0 +1,264 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface SerdeInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.SerdeInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the SerDe. Table name by default. The maximum length is
+   * 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the SerDe. Table name by default. The maximum length is
+   * 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Required. The fully qualified Java class name of the serialization library.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string serialization_lib = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The serializationLib. + */ + java.lang.String getSerializationLib(); + + /** + * + * + *
+   * Required. The fully qualified Java class name of the serialization library.
+   * The maximum length is 4000 characters.
+   * 
+ * + * string serialization_lib = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for serializationLib. + */ + com.google.protobuf.ByteString getSerializationLibBytes(); + + /** + * + * + *
+   * Optional. Description of the serde. The maximum length is 4000 characters.
+   * 
+ * + * string description = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + java.lang.String getDescription(); + + /** + * + * + *
+   * Optional. Description of the serde. The maximum length is 4000 characters.
+   * 
+ * + * string description = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + com.google.protobuf.ByteString getDescriptionBytes(); + + /** + * + * + *
+   * Optional. Parameters of the serde. The maximum size is 10Kib.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getParametersCount(); + + /** + * + * + *
+   * Optional. Parameters of the serde. The maximum size is 10Kib.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsParameters(java.lang.String key); + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Deprecated + java.util.Map getParameters(); + + /** + * + * + *
+   * Optional. Parameters of the serde. The maximum size is 10Kib.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getParametersMap(); + + /** + * + * + *
+   * Optional. Parameters of the serde. The maximum size is 10Kib.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + /* nullable */ + java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
+   * Optional. Parameters of the serde. The maximum size is 10Kib.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getParametersOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the serializer. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string serializer_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The serializerClass. + */ + java.lang.String getSerializerClass(); + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the serializer. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string serializer_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for serializerClass. + */ + com.google.protobuf.ByteString getSerializerClassBytes(); + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the deserializer. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string deserializer_class = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The deserializerClass. + */ + java.lang.String getDeserializerClass(); + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the deserializer. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string deserializer_class = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for deserializerClass. + */ + com.google.protobuf.ByteString getDeserializerClassBytes(); + + /** + * + * + *
+   * Optional. The serde type.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType serde_type = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for serdeType. + */ + int getSerdeTypeValue(); + + /** + * + * + *
+   * Optional. The serde type.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType serde_type = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The serdeType. + */ + com.google.cloud.biglake.hive.v1beta.SerdeInfo.SerdeType getSerdeType(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/StorageDescriptor.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/StorageDescriptor.java new file mode 100644 index 000000000000..5f661948ee3f --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/StorageDescriptor.java @@ -0,0 +1,8637 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Contains information about the physical storage of the table data.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.StorageDescriptor} + */ +@com.google.protobuf.Generated +public final class StorageDescriptor extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.StorageDescriptor) + StorageDescriptorOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "StorageDescriptor"); + } + + // Use StorageDescriptor.newBuilder() to construct. + private StorageDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private StorageDescriptor() { + columns_ = java.util.Collections.emptyList(); + locationUri_ = ""; + inputFormat_ = ""; + outputFormat_ = ""; + bucketCols_ = com.google.protobuf.LazyStringArrayList.emptyList(); + sortCols_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 10: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.class, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Builder.class); + } + + public interface OrderOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.StorageDescriptor.Order) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. The column name. The maximum length is 767 characters.
+     * 
+ * + * string col = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The col. + */ + java.lang.String getCol(); + + /** + * + * + *
+     * Required. The column name. The maximum length is 767 characters.
+     * 
+ * + * string col = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for col. + */ + com.google.protobuf.ByteString getColBytes(); + + /** + * + * + *
+     * Required. Defines the sort order of the column. Ascending if 1,
+     * descending if 0.
+     * 
+ * + * int32 order = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The order. + */ + int getOrder(); + } + + /** + * + * + *
+   * Sort order of the stored data per column.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.StorageDescriptor.Order} + */ + public static final class Order extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.StorageDescriptor.Order) + OrderOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Order"); + } + + // Use Order.newBuilder() to construct. + private Order(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Order() { + col_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_Order_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_Order_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.class, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.Builder.class); + } + + public static final int COL_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object col_ = ""; + + /** + * + * + *
+     * Required. The column name. The maximum length is 767 characters.
+     * 
+ * + * string col = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The col. + */ + @java.lang.Override + public java.lang.String getCol() { + java.lang.Object ref = col_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + col_ = s; + return s; + } + } + + /** + * + * + *
+     * Required. The column name. The maximum length is 767 characters.
+     * 
+ * + * string col = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for col. + */ + @java.lang.Override + public com.google.protobuf.ByteString getColBytes() { + java.lang.Object ref = col_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + col_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ORDER_FIELD_NUMBER = 2; + private int order_ = 0; + + /** + * + * + *
+     * Required. Defines the sort order of the column. Ascending if 1,
+     * descending if 0.
+     * 
+ * + * int32 order = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The order. + */ + @java.lang.Override + public int getOrder() { + return order_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(col_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, col_); + } + if (order_ != 0) { + output.writeInt32(2, order_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(col_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, col_); + } + if (order_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, order_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order other = + (com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order) obj; + + if (!getCol().equals(other.getCol())) return false; + if (getOrder() != other.getOrder()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + COL_FIELD_NUMBER; + hash = (53 * hash) + getCol().hashCode(); + hash = (37 * hash) + ORDER_FIELD_NUMBER; + hash = (53 * hash) + getOrder(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Sort order of the stored data per column.
+     * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.StorageDescriptor.Order} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.StorageDescriptor.Order) + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.OrderOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_Order_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_Order_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.class, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + col_ = ""; + order_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_Order_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order build() { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order buildPartial() { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order result = + new com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.col_ = col_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.order_ = order_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order other) { + if (other + == com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.getDefaultInstance()) + return this; + if (!other.getCol().isEmpty()) { + col_ = other.col_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getOrder() != 0) { + setOrder(other.getOrder()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + col_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + order_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object col_ = ""; + + /** + * + * + *
+       * Required. The column name. The maximum length is 767 characters.
+       * 
+ * + * string col = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The col. + */ + public java.lang.String getCol() { + java.lang.Object ref = col_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + col_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Required. The column name. The maximum length is 767 characters.
+       * 
+ * + * string col = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for col. + */ + public com.google.protobuf.ByteString getColBytes() { + java.lang.Object ref = col_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + col_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Required. The column name. The maximum length is 767 characters.
+       * 
+ * + * string col = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The col to set. + * @return This builder for chaining. + */ + public Builder setCol(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + col_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. The column name. The maximum length is 767 characters.
+       * 
+ * + * string col = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearCol() { + col_ = getDefaultInstance().getCol(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. The column name. The maximum length is 767 characters.
+       * 
+ * + * string col = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for col to set. + * @return This builder for chaining. + */ + public Builder setColBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + col_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int order_; + + /** + * + * + *
+       * Required. Defines the sort order of the column. Ascending if 1,
+       * descending if 0.
+       * 
+ * + * int32 order = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The order. + */ + @java.lang.Override + public int getOrder() { + return order_; + } + + /** + * + * + *
+       * Required. Defines the sort order of the column. Ascending if 1,
+       * descending if 0.
+       * 
+ * + * int32 order = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The order to set. + * @return This builder for chaining. + */ + public Builder setOrder(int value) { + + order_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Defines the sort order of the column. Ascending if 1,
+       * descending if 0.
+       * 
+ * + * int32 order = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearOrder() { + bitField0_ = (bitField0_ & ~0x00000002); + order_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.StorageDescriptor.Order) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.StorageDescriptor.Order) + private static final com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order(); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Order parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface SkewedInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. The column names that are skewed. The maximum length is 256
+     * characters per column name.
+     * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the skewedColNames. + */ + java.util.List getSkewedColNamesList(); + + /** + * + * + *
+     * Required. The column names that are skewed. The maximum length is 256
+     * characters per column name.
+     * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of skewedColNames. + */ + int getSkewedColNamesCount(); + + /** + * + * + *
+     * Required. The column names that are skewed. The maximum length is 256
+     * characters per column name.
+     * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The skewedColNames at the given index. + */ + java.lang.String getSkewedColNames(int index); + + /** + * + * + *
+     * Required. The column names that are skewed. The maximum length is 256
+     * characters per column name.
+     * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the skewedColNames at the given index. + */ + com.google.protobuf.ByteString getSkewedColNamesBytes(int index); + + /** + * + * + *
+     * Required. The skewed column values.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue> + getSkewedColValuesList(); + + /** + * + * + *
+     * Required. The skewed column values.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + getSkewedColValues(int index); + + /** + * + * + *
+     * Required. The skewed column values.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getSkewedColValuesCount(); + + /** + * + * + *
+     * Required. The skewed column values.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List< + ? extends + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValueOrBuilder> + getSkewedColValuesOrBuilderList(); + + /** + * + * + *
+     * Required. The skewed column values.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValueOrBuilder + getSkewedColValuesOrBuilder(int index); + + /** + * + * + *
+     * Required. The skewed key values locations.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation> + getSkewedKeyValuesLocationsList(); + + /** + * + * + *
+     * Required. The skewed key values locations.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation + getSkewedKeyValuesLocations(int index); + + /** + * + * + *
+     * Required. The skewed key values locations.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getSkewedKeyValuesLocationsCount(); + + /** + * + * + *
+     * Required. The skewed key values locations.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List< + ? extends + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocationOrBuilder> + getSkewedKeyValuesLocationsOrBuilderList(); + + /** + * + * + *
+     * Required. The skewed key values locations.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocationOrBuilder + getSkewedKeyValuesLocationsOrBuilder(int index); + } + + /** + * + * + *
+   * Stores all the information about skewed table.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo} + */ + public static final class SkewedInfo extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo) + SkewedInfoOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SkewedInfo"); + } + + // Use SkewedInfo.newBuilder() to construct. + private SkewedInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SkewedInfo() { + skewedColNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + skewedColValues_ = java.util.Collections.emptyList(); + skewedKeyValuesLocations_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.class, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.Builder.class); + } + + public interface SkewedColumnValueOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + java.util.List getValuesList(); + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + int getValuesCount(); + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + java.lang.String getValues(int index); + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + com.google.protobuf.ByteString getValuesBytes(int index); + } + + /** + * + * + *
+     * The skewed column values.
+     * 
+ * + * Protobuf type {@code + * google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue} + */ + public static final class SkewedColumnValue extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue) + SkewedColumnValueOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SkewedColumnValue"); + } + + // Use SkewedColumnValue.newBuilder() to construct. + private SkewedColumnValue(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SkewedColumnValue() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedColumnValue_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedColumnValue_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .class, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .Builder.class); + } + + public static final int VALUES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + return values_; + } + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < values_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, values_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < values_.size(); i++) { + dataSize += computeStringSizeNoTag(values_.getRaw(i)); + } + size += dataSize; + size += 1 * getValuesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue other = + (com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue) + obj; + + if (!getValuesList().equals(other.getValuesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+       * The skewed column values.
+       * 
+ * + * Protobuf type {@code + * google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue) + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValueOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedColumnValue_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedColumnValue_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue.class, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue.Builder.class); + } + + // Construct using + // com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedColumnValue_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + build() { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + buildPartial() { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + result = + new com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + values_.makeImmutable(); + result.values_ = values_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue) { + return mergeFrom( + (com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + other) { + if (other + == com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .getDefaultInstance()) return this; + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ |= 0x00000001; + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureValuesIsMutable(); + values_.add(s); + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureValuesIsMutable() { + if (!values_.isModifiable()) { + values_ = new com.google.protobuf.LazyStringArrayList(values_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + values_.makeImmutable(); + return values_; + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The values to set. + * @return This builder for chaining. + */ + public Builder setValues(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The values to add. + * @return This builder for chaining. + */ + public Builder addValues(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The values to add. + * @return This builder for chaining. + */ + public Builder addAllValues(java.lang.Iterable values) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearValues() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the values to add. + * @return This builder for chaining. + */ + public Builder addValuesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue) + private static final com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue(); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SkewedColumnValue parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface SkewedKeyValuesLocationOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + java.util.List getValuesList(); + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + int getValuesCount(); + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + java.lang.String getValues(int index); + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + com.google.protobuf.ByteString getValuesBytes(int index); + + /** + * + * + *
+       * Required. The location of the skewed column values. The maximum length
+       * is 4000 characters.
+       * 
+ * + * string location = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The location. + */ + java.lang.String getLocation(); + + /** + * + * + *
+       * Required. The location of the skewed column values. The maximum length
+       * is 4000 characters.
+       * 
+ * + * string location = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for location. + */ + com.google.protobuf.ByteString getLocationBytes(); + } + + /** + * + * + *
+     * The skewed key values and their corresponding location.
+     * 
+ * + * Protobuf type {@code + * google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation} + */ + public static final class SkewedKeyValuesLocation extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation) + SkewedKeyValuesLocationOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SkewedKeyValuesLocation"); + } + + // Use SkewedKeyValuesLocation.newBuilder() to construct. + private SkewedKeyValuesLocation(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SkewedKeyValuesLocation() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + location_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedKeyValuesLocation_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedKeyValuesLocation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.class, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.Builder.class); + } + + public static final int VALUES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + return values_; + } + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+       * Required. The skewed column values. The maximum length is 256
+       * characters per value.
+       * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + public static final int LOCATION_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object location_ = ""; + + /** + * + * + *
+       * Required. The location of the skewed column values. The maximum length
+       * is 4000 characters.
+       * 
+ * + * string location = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The location. + */ + @java.lang.Override + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } + } + + /** + * + * + *
+       * Required. The location of the skewed column values. The maximum length
+       * is 4000 characters.
+       * 
+ * + * string location = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for location. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < values_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, values_.getRaw(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, location_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < values_.size(); i++) { + dataSize += computeStringSizeNoTag(values_.getRaw(i)); + } + size += dataSize; + size += 1 * getValuesList().size(); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, location_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation + other = + (com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation) + obj; + + if (!getValuesList().equals(other.getValuesList())) return false; + if (!getLocation().equals(other.getLocation())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + hash = (37 * hash) + LOCATION_FIELD_NUMBER; + hash = (53 * hash) + getLocation().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+       * The skewed key values and their corresponding location.
+       * 
+ * + * Protobuf type {@code + * google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation) + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocationOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedKeyValuesLocation_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedKeyValuesLocation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.class, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.Builder.class); + } + + // Construct using + // com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + location_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_SkewedKeyValuesLocation_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + build() { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation + result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + buildPartial() { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation + result = + new com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + values_.makeImmutable(); + result.values_ = values_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.location_ = location_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation) { + return mergeFrom( + (com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + other) { + if (other + == com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.getDefaultInstance()) return this; + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ |= 0x00000001; + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + if (!other.getLocation().isEmpty()) { + location_ = other.location_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureValuesIsMutable(); + values_.add(s); + break; + } // case 10 + case 18: + { + location_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureValuesIsMutable() { + if (!values_.isModifiable()) { + values_ = new com.google.protobuf.LazyStringArrayList(values_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + values_.makeImmutable(); + return values_; + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The values to set. + * @return This builder for chaining. + */ + public Builder setValues(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The values to add. + * @return This builder for chaining. + */ + public Builder addValues(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The values to add. + * @return This builder for chaining. + */ + public Builder addAllValues(java.lang.Iterable values) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearValues() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+         * Required. The skewed column values. The maximum length is 256
+         * characters per value.
+         * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the values to add. + * @return This builder for chaining. + */ + public Builder addValuesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object location_ = ""; + + /** + * + * + *
+         * Required. The location of the skewed column values. The maximum length
+         * is 4000 characters.
+         * 
+ * + * string location = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The location. + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+         * Required. The location of the skewed column values. The maximum length
+         * is 4000 characters.
+         * 
+ * + * string location = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for location. + */ + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+         * Required. The location of the skewed column values. The maximum length
+         * is 4000 characters.
+         * 
+ * + * string location = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The location to set. + * @return This builder for chaining. + */ + public Builder setLocation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + location_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Required. The location of the skewed column values. The maximum length
+         * is 4000 characters.
+         * 
+ * + * string location = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearLocation() { + location_ = getDefaultInstance().getLocation(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+         * Required. The location of the skewed column values. The maximum length
+         * is 4000 characters.
+         * 
+ * + * string location = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for location to set. + * @return This builder for chaining. + */ + public Builder setLocationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + location_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation) + private static final com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation(); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SkewedKeyValuesLocation parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public static final int SKEWED_COL_NAMES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList skewedColNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+     * Required. The column names that are skewed. The maximum length is 256
+     * characters per column name.
+     * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the skewedColNames. + */ + public com.google.protobuf.ProtocolStringList getSkewedColNamesList() { + return skewedColNames_; + } + + /** + * + * + *
+     * Required. The column names that are skewed. The maximum length is 256
+     * characters per column name.
+     * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of skewedColNames. + */ + public int getSkewedColNamesCount() { + return skewedColNames_.size(); + } + + /** + * + * + *
+     * Required. The column names that are skewed. The maximum length is 256
+     * characters per column name.
+     * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The skewedColNames at the given index. + */ + public java.lang.String getSkewedColNames(int index) { + return skewedColNames_.get(index); + } + + /** + * + * + *
+     * Required. The column names that are skewed. The maximum length is 256
+     * characters per column name.
+     * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the skewedColNames at the given index. + */ + public com.google.protobuf.ByteString getSkewedColNamesBytes(int index) { + return skewedColNames_.getByteString(index); + } + + public static final int SKEWED_COL_VALUES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue> + skewedColValues_; + + /** + * + * + *
+     * Required. The skewed column values.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue> + getSkewedColValuesList() { + return skewedColValues_; + } + + /** + * + * + *
+     * Required. The skewed column values.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + ? extends + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValueOrBuilder> + getSkewedColValuesOrBuilderList() { + return skewedColValues_; + } + + /** + * + * + *
+     * Required. The skewed column values.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getSkewedColValuesCount() { + return skewedColValues_.size(); + } + + /** + * + * + *
+     * Required. The skewed column values.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + getSkewedColValues(int index) { + return skewedColValues_.get(index); + } + + /** + * + * + *
+     * Required. The skewed column values.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValueOrBuilder + getSkewedColValuesOrBuilder(int index) { + return skewedColValues_.get(index); + } + + public static final int SKEWED_KEY_VALUES_LOCATIONS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation> + skewedKeyValuesLocations_; + + /** + * + * + *
+     * Required. The skewed key values locations.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation> + getSkewedKeyValuesLocationsList() { + return skewedKeyValuesLocations_; + } + + /** + * + * + *
+     * Required. The skewed key values locations.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + ? extends + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocationOrBuilder> + getSkewedKeyValuesLocationsOrBuilderList() { + return skewedKeyValuesLocations_; + } + + /** + * + * + *
+     * Required. The skewed key values locations.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getSkewedKeyValuesLocationsCount() { + return skewedKeyValuesLocations_.size(); + } + + /** + * + * + *
+     * Required. The skewed key values locations.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation + getSkewedKeyValuesLocations(int index) { + return skewedKeyValuesLocations_.get(index); + } + + /** + * + * + *
+     * Required. The skewed key values locations.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocationOrBuilder + getSkewedKeyValuesLocationsOrBuilder(int index) { + return skewedKeyValuesLocations_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < skewedColNames_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, skewedColNames_.getRaw(i)); + } + for (int i = 0; i < skewedColValues_.size(); i++) { + output.writeMessage(2, skewedColValues_.get(i)); + } + for (int i = 0; i < skewedKeyValuesLocations_.size(); i++) { + output.writeMessage(3, skewedKeyValuesLocations_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < skewedColNames_.size(); i++) { + dataSize += computeStringSizeNoTag(skewedColNames_.getRaw(i)); + } + size += dataSize; + size += 1 * getSkewedColNamesList().size(); + } + for (int i = 0; i < skewedColValues_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(2, skewedColValues_.get(i)); + } + for (int i = 0; i < skewedKeyValuesLocations_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, skewedKeyValuesLocations_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo other = + (com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo) obj; + + if (!getSkewedColNamesList().equals(other.getSkewedColNamesList())) return false; + if (!getSkewedColValuesList().equals(other.getSkewedColValuesList())) return false; + if (!getSkewedKeyValuesLocationsList().equals(other.getSkewedKeyValuesLocationsList())) + return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getSkewedColNamesCount() > 0) { + hash = (37 * hash) + SKEWED_COL_NAMES_FIELD_NUMBER; + hash = (53 * hash) + getSkewedColNamesList().hashCode(); + } + if (getSkewedColValuesCount() > 0) { + hash = (37 * hash) + SKEWED_COL_VALUES_FIELD_NUMBER; + hash = (53 * hash) + getSkewedColValuesList().hashCode(); + } + if (getSkewedKeyValuesLocationsCount() > 0) { + hash = (37 * hash) + SKEWED_KEY_VALUES_LOCATIONS_FIELD_NUMBER; + hash = (53 * hash) + getSkewedKeyValuesLocationsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Stores all the information about skewed table.
+     * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo) + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.class, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.Builder.class); + } + + // Construct using + // com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + skewedColNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + if (skewedColValuesBuilder_ == null) { + skewedColValues_ = java.util.Collections.emptyList(); + } else { + skewedColValues_ = null; + skewedColValuesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (skewedKeyValuesLocationsBuilder_ == null) { + skewedKeyValuesLocations_ = java.util.Collections.emptyList(); + } else { + skewedKeyValuesLocations_ = null; + skewedKeyValuesLocationsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_SkewedInfo_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo build() { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo buildPartial() { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo result = + new com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo result) { + if (skewedColValuesBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + skewedColValues_ = java.util.Collections.unmodifiableList(skewedColValues_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.skewedColValues_ = skewedColValues_; + } else { + result.skewedColValues_ = skewedColValuesBuilder_.build(); + } + if (skewedKeyValuesLocationsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + skewedKeyValuesLocations_ = + java.util.Collections.unmodifiableList(skewedKeyValuesLocations_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.skewedKeyValuesLocations_ = skewedKeyValuesLocations_; + } else { + result.skewedKeyValuesLocations_ = skewedKeyValuesLocationsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + skewedColNames_.makeImmutable(); + result.skewedColNames_ = skewedColNames_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo) { + return mergeFrom( + (com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo other) { + if (other + == com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .getDefaultInstance()) return this; + if (!other.skewedColNames_.isEmpty()) { + if (skewedColNames_.isEmpty()) { + skewedColNames_ = other.skewedColNames_; + bitField0_ |= 0x00000001; + } else { + ensureSkewedColNamesIsMutable(); + skewedColNames_.addAll(other.skewedColNames_); + } + onChanged(); + } + if (skewedColValuesBuilder_ == null) { + if (!other.skewedColValues_.isEmpty()) { + if (skewedColValues_.isEmpty()) { + skewedColValues_ = other.skewedColValues_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureSkewedColValuesIsMutable(); + skewedColValues_.addAll(other.skewedColValues_); + } + onChanged(); + } + } else { + if (!other.skewedColValues_.isEmpty()) { + if (skewedColValuesBuilder_.isEmpty()) { + skewedColValuesBuilder_.dispose(); + skewedColValuesBuilder_ = null; + skewedColValues_ = other.skewedColValues_; + bitField0_ = (bitField0_ & ~0x00000002); + skewedColValuesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetSkewedColValuesFieldBuilder() + : null; + } else { + skewedColValuesBuilder_.addAllMessages(other.skewedColValues_); + } + } + } + if (skewedKeyValuesLocationsBuilder_ == null) { + if (!other.skewedKeyValuesLocations_.isEmpty()) { + if (skewedKeyValuesLocations_.isEmpty()) { + skewedKeyValuesLocations_ = other.skewedKeyValuesLocations_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureSkewedKeyValuesLocationsIsMutable(); + skewedKeyValuesLocations_.addAll(other.skewedKeyValuesLocations_); + } + onChanged(); + } + } else { + if (!other.skewedKeyValuesLocations_.isEmpty()) { + if (skewedKeyValuesLocationsBuilder_.isEmpty()) { + skewedKeyValuesLocationsBuilder_.dispose(); + skewedKeyValuesLocationsBuilder_ = null; + skewedKeyValuesLocations_ = other.skewedKeyValuesLocations_; + bitField0_ = (bitField0_ & ~0x00000004); + skewedKeyValuesLocationsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetSkewedKeyValuesLocationsFieldBuilder() + : null; + } else { + skewedKeyValuesLocationsBuilder_.addAllMessages(other.skewedKeyValuesLocations_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureSkewedColNamesIsMutable(); + skewedColNames_.add(s); + break; + } // case 10 + case 18: + { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue + m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue.parser(), + extensionRegistry); + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(m); + } else { + skewedColValuesBuilder_.addMessage(m); + } + break; + } // case 18 + case 26: + { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.parser(), + extensionRegistry); + if (skewedKeyValuesLocationsBuilder_ == null) { + ensureSkewedKeyValuesLocationsIsMutable(); + skewedKeyValuesLocations_.add(m); + } else { + skewedKeyValuesLocationsBuilder_.addMessage(m); + } + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList skewedColNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureSkewedColNamesIsMutable() { + if (!skewedColNames_.isModifiable()) { + skewedColNames_ = new com.google.protobuf.LazyStringArrayList(skewedColNames_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+       * Required. The column names that are skewed. The maximum length is 256
+       * characters per column name.
+       * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return A list containing the skewedColNames. + */ + public com.google.protobuf.ProtocolStringList getSkewedColNamesList() { + skewedColNames_.makeImmutable(); + return skewedColNames_; + } + + /** + * + * + *
+       * Required. The column names that are skewed. The maximum length is 256
+       * characters per column name.
+       * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The count of skewedColNames. + */ + public int getSkewedColNamesCount() { + return skewedColNames_.size(); + } + + /** + * + * + *
+       * Required. The column names that are skewed. The maximum length is 256
+       * characters per column name.
+       * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param index The index of the element to return. + * @return The skewedColNames at the given index. + */ + public java.lang.String getSkewedColNames(int index) { + return skewedColNames_.get(index); + } + + /** + * + * + *
+       * Required. The column names that are skewed. The maximum length is 256
+       * characters per column name.
+       * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param index The index of the value to return. + * @return The bytes of the skewedColNames at the given index. + */ + public com.google.protobuf.ByteString getSkewedColNamesBytes(int index) { + return skewedColNames_.getByteString(index); + } + + /** + * + * + *
+       * Required. The column names that are skewed. The maximum length is 256
+       * characters per column name.
+       * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param index The index to set the value at. + * @param value The skewedColNames to set. + * @return This builder for chaining. + */ + public Builder setSkewedColNames(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColNamesIsMutable(); + skewedColNames_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. The column names that are skewed. The maximum length is 256
+       * characters per column name.
+       * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The skewedColNames to add. + * @return This builder for chaining. + */ + public Builder addSkewedColNames(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColNamesIsMutable(); + skewedColNames_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. The column names that are skewed. The maximum length is 256
+       * characters per column name.
+       * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param values The skewedColNames to add. + * @return This builder for chaining. + */ + public Builder addAllSkewedColNames(java.lang.Iterable values) { + ensureSkewedColNamesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, skewedColNames_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. The column names that are skewed. The maximum length is 256
+       * characters per column name.
+       * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return This builder for chaining. + */ + public Builder clearSkewedColNames() { + skewedColNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. The column names that are skewed. The maximum length is 256
+       * characters per column name.
+       * 
+ * + * repeated string skewed_col_names = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The bytes of the skewedColNames to add. + * @return This builder for chaining. + */ + public Builder addSkewedColNamesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureSkewedColNamesIsMutable(); + skewedColNames_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue> + skewedColValues_ = java.util.Collections.emptyList(); + + private void ensureSkewedColValuesIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + skewedColValues_ = + new java.util.ArrayList< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue>(skewedColValues_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValueOrBuilder> + skewedColValuesBuilder_; + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue> + getSkewedColValuesList() { + if (skewedColValuesBuilder_ == null) { + return java.util.Collections.unmodifiableList(skewedColValues_); + } else { + return skewedColValuesBuilder_.getMessageList(); + } + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getSkewedColValuesCount() { + if (skewedColValuesBuilder_ == null) { + return skewedColValues_.size(); + } else { + return skewedColValuesBuilder_.getCount(); + } + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + getSkewedColValues(int index) { + if (skewedColValuesBuilder_ == null) { + return skewedColValues_.get(index); + } else { + return skewedColValuesBuilder_.getMessage(index); + } + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setSkewedColValues( + int index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + value) { + if (skewedColValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValuesIsMutable(); + skewedColValues_.set(index, value); + onChanged(); + } else { + skewedColValuesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setSkewedColValues( + int index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .Builder + builderForValue) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.set(index, builderForValue.build()); + onChanged(); + } else { + skewedColValuesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addSkewedColValues( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + value) { + if (skewedColValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(value); + onChanged(); + } else { + skewedColValuesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addSkewedColValues( + int index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + value) { + if (skewedColValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(index, value); + onChanged(); + } else { + skewedColValuesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addSkewedColValues( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .Builder + builderForValue) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(builderForValue.build()); + onChanged(); + } else { + skewedColValuesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addSkewedColValues( + int index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .Builder + builderForValue) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(index, builderForValue.build()); + onChanged(); + } else { + skewedColValuesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllSkewedColValues( + java.lang.Iterable< + ? extends + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue> + values) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, skewedColValues_); + onChanged(); + } else { + skewedColValuesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearSkewedColValues() { + if (skewedColValuesBuilder_ == null) { + skewedColValues_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + skewedColValuesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeSkewedColValues(int index) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.remove(index); + onChanged(); + } else { + skewedColValuesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .Builder + getSkewedColValuesBuilder(int index) { + return internalGetSkewedColValuesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValueOrBuilder + getSkewedColValuesOrBuilder(int index) { + if (skewedColValuesBuilder_ == null) { + return skewedColValues_.get(index); + } else { + return skewedColValuesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + ? extends + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValueOrBuilder> + getSkewedColValuesOrBuilderList() { + if (skewedColValuesBuilder_ != null) { + return skewedColValuesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(skewedColValues_); + } + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .Builder + addSkewedColValuesBuilder() { + return internalGetSkewedColValuesFieldBuilder() + .addBuilder( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .getDefaultInstance()); + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .Builder + addSkewedColValuesBuilder(int index) { + return internalGetSkewedColValuesFieldBuilder() + .addBuilder( + index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .getDefaultInstance()); + } + + /** + * + * + *
+       * Required. The skewed column values.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue skewed_col_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .Builder> + getSkewedColValuesBuilderList() { + return internalGetSkewedColValuesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedColumnValue + .Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValueOrBuilder> + internalGetSkewedColValuesFieldBuilder() { + if (skewedColValuesBuilder_ == null) { + skewedColValuesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValue.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedColumnValueOrBuilder>( + skewedColValues_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + skewedColValues_ = null; + } + return skewedColValuesBuilder_; + } + + private java.util.List< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation> + skewedKeyValuesLocations_ = java.util.Collections.emptyList(); + + private void ensureSkewedKeyValuesLocationsIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + skewedKeyValuesLocations_ = + new java.util.ArrayList< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation>(skewedKeyValuesLocations_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocationOrBuilder> + skewedKeyValuesLocationsBuilder_; + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation> + getSkewedKeyValuesLocationsList() { + if (skewedKeyValuesLocationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(skewedKeyValuesLocations_); + } else { + return skewedKeyValuesLocationsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getSkewedKeyValuesLocationsCount() { + if (skewedKeyValuesLocationsBuilder_ == null) { + return skewedKeyValuesLocations_.size(); + } else { + return skewedKeyValuesLocationsBuilder_.getCount(); + } + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation + getSkewedKeyValuesLocations(int index) { + if (skewedKeyValuesLocationsBuilder_ == null) { + return skewedKeyValuesLocations_.get(index); + } else { + return skewedKeyValuesLocationsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setSkewedKeyValuesLocations( + int index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation + value) { + if (skewedKeyValuesLocationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedKeyValuesLocationsIsMutable(); + skewedKeyValuesLocations_.set(index, value); + onChanged(); + } else { + skewedKeyValuesLocationsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setSkewedKeyValuesLocations( + int index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation + .Builder + builderForValue) { + if (skewedKeyValuesLocationsBuilder_ == null) { + ensureSkewedKeyValuesLocationsIsMutable(); + skewedKeyValuesLocations_.set(index, builderForValue.build()); + onChanged(); + } else { + skewedKeyValuesLocationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addSkewedKeyValuesLocations( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation + value) { + if (skewedKeyValuesLocationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedKeyValuesLocationsIsMutable(); + skewedKeyValuesLocations_.add(value); + onChanged(); + } else { + skewedKeyValuesLocationsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addSkewedKeyValuesLocations( + int index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation + value) { + if (skewedKeyValuesLocationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedKeyValuesLocationsIsMutable(); + skewedKeyValuesLocations_.add(index, value); + onChanged(); + } else { + skewedKeyValuesLocationsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addSkewedKeyValuesLocations( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation + .Builder + builderForValue) { + if (skewedKeyValuesLocationsBuilder_ == null) { + ensureSkewedKeyValuesLocationsIsMutable(); + skewedKeyValuesLocations_.add(builderForValue.build()); + onChanged(); + } else { + skewedKeyValuesLocationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addSkewedKeyValuesLocations( + int index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation + .Builder + builderForValue) { + if (skewedKeyValuesLocationsBuilder_ == null) { + ensureSkewedKeyValuesLocationsIsMutable(); + skewedKeyValuesLocations_.add(index, builderForValue.build()); + onChanged(); + } else { + skewedKeyValuesLocationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllSkewedKeyValuesLocations( + java.lang.Iterable< + ? extends + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation> + values) { + if (skewedKeyValuesLocationsBuilder_ == null) { + ensureSkewedKeyValuesLocationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, skewedKeyValuesLocations_); + onChanged(); + } else { + skewedKeyValuesLocationsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearSkewedKeyValuesLocations() { + if (skewedKeyValuesLocationsBuilder_ == null) { + skewedKeyValuesLocations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + skewedKeyValuesLocationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeSkewedKeyValuesLocations(int index) { + if (skewedKeyValuesLocationsBuilder_ == null) { + ensureSkewedKeyValuesLocationsIsMutable(); + skewedKeyValuesLocations_.remove(index); + onChanged(); + } else { + skewedKeyValuesLocationsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.Builder + getSkewedKeyValuesLocationsBuilder(int index) { + return internalGetSkewedKeyValuesLocationsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocationOrBuilder + getSkewedKeyValuesLocationsOrBuilder(int index) { + if (skewedKeyValuesLocationsBuilder_ == null) { + return skewedKeyValuesLocations_.get(index); + } else { + return skewedKeyValuesLocationsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + ? extends + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocationOrBuilder> + getSkewedKeyValuesLocationsOrBuilderList() { + if (skewedKeyValuesLocationsBuilder_ != null) { + return skewedKeyValuesLocationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(skewedKeyValuesLocations_); + } + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.Builder + addSkewedKeyValuesLocationsBuilder() { + return internalGetSkewedKeyValuesLocationsFieldBuilder() + .addBuilder( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.getDefaultInstance()); + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.Builder + addSkewedKeyValuesLocationsBuilder(int index) { + return internalGetSkewedKeyValuesLocationsFieldBuilder() + .addBuilder( + index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.getDefaultInstance()); + } + + /** + * + * + *
+       * Required. The skewed key values locations.
+       * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.SkewedKeyValuesLocation skewed_key_values_locations = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.Builder> + getSkewedKeyValuesLocationsBuilderList() { + return internalGetSkewedKeyValuesLocationsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocationOrBuilder> + internalGetSkewedKeyValuesLocationsFieldBuilder() { + if (skewedKeyValuesLocationsBuilder_ == null) { + skewedKeyValuesLocationsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocation.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .SkewedKeyValuesLocationOrBuilder>( + skewedKeyValuesLocations_, + ((bitField0_ & 0x00000004) != 0), + getParentForChildren(), + isClean()); + skewedKeyValuesLocations_ = null; + } + return skewedKeyValuesLocationsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo) + private static final com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo(); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SkewedInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int COLUMNS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List columns_; + + /** + * + * + *
+   * Required. Specifies the columns of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List getColumnsList() { + return columns_; + } + + /** + * + * + *
+   * Required. Specifies the columns of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getColumnsOrBuilderList() { + return columns_; + } + + /** + * + * + *
+   * Required. Specifies the columns of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getColumnsCount() { + return columns_.size(); + } + + /** + * + * + *
+   * Required. Specifies the columns of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.FieldSchema getColumns(int index) { + return columns_.get(index); + } + + /** + * + * + *
+   * Required. Specifies the columns of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder getColumnsOrBuilder(int index) { + return columns_.get(index); + } + + public static final int LOCATION_URI_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object locationUri_ = ""; + + /** + * + * + *
+   * Optional. The Cloud storage uri where the table is located.
+   * Defaults to `<database_location_uri>/<table_name>`. The maximum length is
+   * 4000 characters.
+   * 
+ * + * string location_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The locationUri. + */ + @java.lang.Override + public java.lang.String getLocationUri() { + java.lang.Object ref = locationUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationUri_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The Cloud storage uri where the table is located.
+   * Defaults to `<database_location_uri>/<table_name>`. The maximum length is
+   * 4000 characters.
+   * 
+ * + * string location_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for locationUri. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationUriBytes() { + java.lang.Object ref = locationUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INPUT_FORMAT_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object inputFormat_ = ""; + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the input format. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string input_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The inputFormat. + */ + @java.lang.Override + public java.lang.String getInputFormat() { + java.lang.Object ref = inputFormat_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + inputFormat_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the input format. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string input_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for inputFormat. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInputFormatBytes() { + java.lang.Object ref = inputFormat_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + inputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OUTPUT_FORMAT_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object outputFormat_ = ""; + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the output format. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The outputFormat. + */ + @java.lang.Override + public java.lang.String getOutputFormat() { + java.lang.Object ref = outputFormat_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + outputFormat_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the output format. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for outputFormat. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOutputFormatBytes() { + java.lang.Object ref = outputFormat_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + outputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int COMPRESSED_FIELD_NUMBER = 5; + private boolean compressed_ = false; + + /** + * + * + *
+   * Optional. Whether the table is compressed.
+   * 
+ * + * optional bool compressed = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the compressed field is set. + */ + @java.lang.Override + public boolean hasCompressed() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. Whether the table is compressed.
+   * 
+ * + * optional bool compressed = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The compressed. + */ + @java.lang.Override + public boolean getCompressed() { + return compressed_; + } + + public static final int NUM_BUCKETS_FIELD_NUMBER = 6; + private int numBuckets_ = 0; + + /** + * + * + *
+   * Optional. The number of buckets in the table.
+   * 
+ * + * optional int32 num_buckets = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the numBuckets field is set. + */ + @java.lang.Override + public boolean hasNumBuckets() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. The number of buckets in the table.
+   * 
+ * + * optional int32 num_buckets = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The numBuckets. + */ + @java.lang.Override + public int getNumBuckets() { + return numBuckets_; + } + + public static final int SERDE_INFO_FIELD_NUMBER = 7; + private com.google.cloud.biglake.hive.v1beta.SerdeInfo serdeInfo_; + + /** + * + * + *
+   * Optional. Serialization and deserialization information.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the serdeInfo field is set. + */ + @java.lang.Override + public boolean hasSerdeInfo() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Optional. Serialization and deserialization information.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The serdeInfo. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.SerdeInfo getSerdeInfo() { + return serdeInfo_ == null + ? com.google.cloud.biglake.hive.v1beta.SerdeInfo.getDefaultInstance() + : serdeInfo_; + } + + /** + * + * + *
+   * Optional. Serialization and deserialization information.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.SerdeInfoOrBuilder getSerdeInfoOrBuilder() { + return serdeInfo_ == null + ? com.google.cloud.biglake.hive.v1beta.SerdeInfo.getDefaultInstance() + : serdeInfo_; + } + + public static final int BUCKET_COLS_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList bucketCols_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+   * Optional. Reducer grouping columns and clustering columns and bucketing
+   * columns
+   * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the bucketCols. + */ + public com.google.protobuf.ProtocolStringList getBucketColsList() { + return bucketCols_; + } + + /** + * + * + *
+   * Optional. Reducer grouping columns and clustering columns and bucketing
+   * columns
+   * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of bucketCols. + */ + public int getBucketColsCount() { + return bucketCols_.size(); + } + + /** + * + * + *
+   * Optional. Reducer grouping columns and clustering columns and bucketing
+   * columns
+   * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The bucketCols at the given index. + */ + public java.lang.String getBucketCols(int index) { + return bucketCols_.get(index); + } + + /** + * + * + *
+   * Optional. Reducer grouping columns and clustering columns and bucketing
+   * columns
+   * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the bucketCols at the given index. + */ + public com.google.protobuf.ByteString getBucketColsBytes(int index) { + return bucketCols_.getByteString(index); + } + + public static final int SORT_COLS_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private java.util.List sortCols_; + + /** + * + * + *
+   * Optional. Sort order of the data in each bucket
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getSortColsList() { + return sortCols_; + } + + /** + * + * + *
+   * Optional. Sort order of the data in each bucket
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.biglake.hive.v1beta.StorageDescriptor.OrderOrBuilder> + getSortColsOrBuilderList() { + return sortCols_; + } + + /** + * + * + *
+   * Optional. Sort order of the data in each bucket
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getSortColsCount() { + return sortCols_.size(); + } + + /** + * + * + *
+   * Optional. Sort order of the data in each bucket
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order getSortCols(int index) { + return sortCols_.get(index); + } + + /** + * + * + *
+   * Optional. Sort order of the data in each bucket
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.OrderOrBuilder getSortColsOrBuilder( + int index) { + return sortCols_.get(index); + } + + public static final int PARAMETERS_FIELD_NUMBER = 10; + + private static final class ParametersDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_ParametersEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField(ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+   * Optional. Key-value pairs for the storage descriptor. The maximum size is
+   * 10Kib.
+   * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+   * Optional. Key-value pairs for the storage descriptor. The maximum size is
+   * 10Kib.
+   * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+   * Optional. Key-value pairs for the storage descriptor. The maximum size is
+   * 10Kib.
+   * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * Optional. Key-value pairs for the storage descriptor. The maximum size is
+   * 10Kib.
+   * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int SKEWED_INFO_FIELD_NUMBER = 11; + private com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewedInfo_; + + /** + * + * + *
+   * Optional. Table data skew information.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the skewedInfo field is set. + */ + @java.lang.Override + public boolean hasSkewedInfo() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Optional. Table data skew information.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The skewedInfo. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo getSkewedInfo() { + return skewedInfo_ == null + ? com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.getDefaultInstance() + : skewedInfo_; + } + + /** + * + * + *
+   * Optional. Table data skew information.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfoOrBuilder + getSkewedInfoOrBuilder() { + return skewedInfo_ == null + ? com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.getDefaultInstance() + : skewedInfo_; + } + + public static final int STORED_AS_SUB_DIRS_FIELD_NUMBER = 12; + private boolean storedAsSubDirs_ = false; + + /** + * + * + *
+   * Optional. Whether the table is stored as sub directories.
+   * 
+ * + * optional bool stored_as_sub_dirs = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the storedAsSubDirs field is set. + */ + @java.lang.Override + public boolean hasStoredAsSubDirs() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Optional. Whether the table is stored as sub directories.
+   * 
+ * + * optional bool stored_as_sub_dirs = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The storedAsSubDirs. + */ + @java.lang.Override + public boolean getStoredAsSubDirs() { + return storedAsSubDirs_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < columns_.size(); i++) { + output.writeMessage(1, columns_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(locationUri_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, locationUri_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(inputFormat_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, inputFormat_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(outputFormat_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, outputFormat_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeBool(5, compressed_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt32(6, numBuckets_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(7, getSerdeInfo()); + } + for (int i = 0; i < bucketCols_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 8, bucketCols_.getRaw(i)); + } + for (int i = 0; i < sortCols_.size(); i++) { + output.writeMessage(9, sortCols_.get(i)); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetParameters(), ParametersDefaultEntryHolder.defaultEntry, 10); + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(11, getSkewedInfo()); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeBool(12, storedAsSubDirs_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < columns_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, columns_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(locationUri_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, locationUri_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(inputFormat_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, inputFormat_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(outputFormat_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, outputFormat_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, compressed_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(6, numBuckets_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getSerdeInfo()); + } + { + int dataSize = 0; + for (int i = 0; i < bucketCols_.size(); i++) { + dataSize += computeStringSizeNoTag(bucketCols_.getRaw(i)); + } + size += dataSize; + size += 1 * getBucketColsList().size(); + } + for (int i = 0; i < sortCols_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, sortCols_.get(i)); + } + for (java.util.Map.Entry entry : + internalGetParameters().getMap().entrySet()) { + com.google.protobuf.MapEntry parameters__ = + ParametersDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, parameters__); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, getSkewedInfo()); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(12, storedAsSubDirs_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.StorageDescriptor)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.StorageDescriptor other = + (com.google.cloud.biglake.hive.v1beta.StorageDescriptor) obj; + + if (!getColumnsList().equals(other.getColumnsList())) return false; + if (!getLocationUri().equals(other.getLocationUri())) return false; + if (!getInputFormat().equals(other.getInputFormat())) return false; + if (!getOutputFormat().equals(other.getOutputFormat())) return false; + if (hasCompressed() != other.hasCompressed()) return false; + if (hasCompressed()) { + if (getCompressed() != other.getCompressed()) return false; + } + if (hasNumBuckets() != other.hasNumBuckets()) return false; + if (hasNumBuckets()) { + if (getNumBuckets() != other.getNumBuckets()) return false; + } + if (hasSerdeInfo() != other.hasSerdeInfo()) return false; + if (hasSerdeInfo()) { + if (!getSerdeInfo().equals(other.getSerdeInfo())) return false; + } + if (!getBucketColsList().equals(other.getBucketColsList())) return false; + if (!getSortColsList().equals(other.getSortColsList())) return false; + if (!internalGetParameters().equals(other.internalGetParameters())) return false; + if (hasSkewedInfo() != other.hasSkewedInfo()) return false; + if (hasSkewedInfo()) { + if (!getSkewedInfo().equals(other.getSkewedInfo())) return false; + } + if (hasStoredAsSubDirs() != other.hasStoredAsSubDirs()) return false; + if (hasStoredAsSubDirs()) { + if (getStoredAsSubDirs() != other.getStoredAsSubDirs()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getColumnsCount() > 0) { + hash = (37 * hash) + COLUMNS_FIELD_NUMBER; + hash = (53 * hash) + getColumnsList().hashCode(); + } + hash = (37 * hash) + LOCATION_URI_FIELD_NUMBER; + hash = (53 * hash) + getLocationUri().hashCode(); + hash = (37 * hash) + INPUT_FORMAT_FIELD_NUMBER; + hash = (53 * hash) + getInputFormat().hashCode(); + hash = (37 * hash) + OUTPUT_FORMAT_FIELD_NUMBER; + hash = (53 * hash) + getOutputFormat().hashCode(); + if (hasCompressed()) { + hash = (37 * hash) + COMPRESSED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getCompressed()); + } + if (hasNumBuckets()) { + hash = (37 * hash) + NUM_BUCKETS_FIELD_NUMBER; + hash = (53 * hash) + getNumBuckets(); + } + if (hasSerdeInfo()) { + hash = (37 * hash) + SERDE_INFO_FIELD_NUMBER; + hash = (53 * hash) + getSerdeInfo().hashCode(); + } + if (getBucketColsCount() > 0) { + hash = (37 * hash) + BUCKET_COLS_FIELD_NUMBER; + hash = (53 * hash) + getBucketColsList().hashCode(); + } + if (getSortColsCount() > 0) { + hash = (37 * hash) + SORT_COLS_FIELD_NUMBER; + hash = (53 * hash) + getSortColsList().hashCode(); + } + if (!internalGetParameters().getMap().isEmpty()) { + hash = (37 * hash) + PARAMETERS_FIELD_NUMBER; + hash = (53 * hash) + internalGetParameters().hashCode(); + } + if (hasSkewedInfo()) { + hash = (37 * hash) + SKEWED_INFO_FIELD_NUMBER; + hash = (53 * hash) + getSkewedInfo().hashCode(); + } + if (hasStoredAsSubDirs()) { + hash = (37 * hash) + STORED_AS_SUB_DIRS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getStoredAsSubDirs()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Contains information about the physical storage of the table data.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.StorageDescriptor} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.StorageDescriptor) + com.google.cloud.biglake.hive.v1beta.StorageDescriptorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 10: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 10: + return internalGetMutableParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.class, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.StorageDescriptor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetColumnsFieldBuilder(); + internalGetSerdeInfoFieldBuilder(); + internalGetSortColsFieldBuilder(); + internalGetSkewedInfoFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (columnsBuilder_ == null) { + columns_ = java.util.Collections.emptyList(); + } else { + columns_ = null; + columnsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + locationUri_ = ""; + inputFormat_ = ""; + outputFormat_ = ""; + compressed_ = false; + numBuckets_ = 0; + serdeInfo_ = null; + if (serdeInfoBuilder_ != null) { + serdeInfoBuilder_.dispose(); + serdeInfoBuilder_ = null; + } + bucketCols_ = com.google.protobuf.LazyStringArrayList.emptyList(); + if (sortColsBuilder_ == null) { + sortCols_ = java.util.Collections.emptyList(); + } else { + sortCols_ = null; + sortColsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + internalGetMutableParameters().clear(); + skewedInfo_ = null; + if (skewedInfoBuilder_ != null) { + skewedInfoBuilder_.dispose(); + skewedInfoBuilder_ = null; + } + storedAsSubDirs_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_StorageDescriptor_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.StorageDescriptor.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor build() { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor buildPartial() { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor result = + new com.google.cloud.biglake.hive.v1beta.StorageDescriptor(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor result) { + if (columnsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + columns_ = java.util.Collections.unmodifiableList(columns_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.columns_ = columns_; + } else { + result.columns_ = columnsBuilder_.build(); + } + if (sortColsBuilder_ == null) { + if (((bitField0_ & 0x00000100) != 0)) { + sortCols_ = java.util.Collections.unmodifiableList(sortCols_); + bitField0_ = (bitField0_ & ~0x00000100); + } + result.sortCols_ = sortCols_; + } else { + result.sortCols_ = sortColsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.StorageDescriptor result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.locationUri_ = locationUri_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.inputFormat_ = inputFormat_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.outputFormat_ = outputFormat_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.compressed_ = compressed_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.numBuckets_ = numBuckets_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.serdeInfo_ = serdeInfoBuilder_ == null ? serdeInfo_ : serdeInfoBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + bucketCols_.makeImmutable(); + result.bucketCols_ = bucketCols_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.parameters_ = internalGetParameters(); + result.parameters_.makeImmutable(); + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.skewedInfo_ = skewedInfoBuilder_ == null ? skewedInfo_ : skewedInfoBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + result.storedAsSubDirs_ = storedAsSubDirs_; + to_bitField0_ |= 0x00000010; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.StorageDescriptor) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.StorageDescriptor) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.StorageDescriptor other) { + if (other == com.google.cloud.biglake.hive.v1beta.StorageDescriptor.getDefaultInstance()) + return this; + if (columnsBuilder_ == null) { + if (!other.columns_.isEmpty()) { + if (columns_.isEmpty()) { + columns_ = other.columns_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureColumnsIsMutable(); + columns_.addAll(other.columns_); + } + onChanged(); + } + } else { + if (!other.columns_.isEmpty()) { + if (columnsBuilder_.isEmpty()) { + columnsBuilder_.dispose(); + columnsBuilder_ = null; + columns_ = other.columns_; + bitField0_ = (bitField0_ & ~0x00000001); + columnsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetColumnsFieldBuilder() + : null; + } else { + columnsBuilder_.addAllMessages(other.columns_); + } + } + } + if (!other.getLocationUri().isEmpty()) { + locationUri_ = other.locationUri_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getInputFormat().isEmpty()) { + inputFormat_ = other.inputFormat_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getOutputFormat().isEmpty()) { + outputFormat_ = other.outputFormat_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (other.hasCompressed()) { + setCompressed(other.getCompressed()); + } + if (other.hasNumBuckets()) { + setNumBuckets(other.getNumBuckets()); + } + if (other.hasSerdeInfo()) { + mergeSerdeInfo(other.getSerdeInfo()); + } + if (!other.bucketCols_.isEmpty()) { + if (bucketCols_.isEmpty()) { + bucketCols_ = other.bucketCols_; + bitField0_ |= 0x00000080; + } else { + ensureBucketColsIsMutable(); + bucketCols_.addAll(other.bucketCols_); + } + onChanged(); + } + if (sortColsBuilder_ == null) { + if (!other.sortCols_.isEmpty()) { + if (sortCols_.isEmpty()) { + sortCols_ = other.sortCols_; + bitField0_ = (bitField0_ & ~0x00000100); + } else { + ensureSortColsIsMutable(); + sortCols_.addAll(other.sortCols_); + } + onChanged(); + } + } else { + if (!other.sortCols_.isEmpty()) { + if (sortColsBuilder_.isEmpty()) { + sortColsBuilder_.dispose(); + sortColsBuilder_ = null; + sortCols_ = other.sortCols_; + bitField0_ = (bitField0_ & ~0x00000100); + sortColsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetSortColsFieldBuilder() + : null; + } else { + sortColsBuilder_.addAllMessages(other.sortCols_); + } + } + } + internalGetMutableParameters().mergeFrom(other.internalGetParameters()); + bitField0_ |= 0x00000200; + if (other.hasSkewedInfo()) { + mergeSkewedInfo(other.getSkewedInfo()); + } + if (other.hasStoredAsSubDirs()) { + setStoredAsSubDirs(other.getStoredAsSubDirs()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.biglake.hive.v1beta.FieldSchema m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.FieldSchema.parser(), + extensionRegistry); + if (columnsBuilder_ == null) { + ensureColumnsIsMutable(); + columns_.add(m); + } else { + columnsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + locationUri_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + inputFormat_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + outputFormat_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 40: + { + compressed_ = input.readBool(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 48: + { + numBuckets_ = input.readInt32(); + bitField0_ |= 0x00000020; + break; + } // case 48 + case 58: + { + input.readMessage( + internalGetSerdeInfoFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 58 + case 66: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureBucketColsIsMutable(); + bucketCols_.add(s); + break; + } // case 66 + case 74: + { + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order m = + input.readMessage( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.parser(), + extensionRegistry); + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.add(m); + } else { + sortColsBuilder_.addMessage(m); + } + break; + } // case 74 + case 82: + { + com.google.protobuf.MapEntry parameters__ = + input.readMessage( + ParametersDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableParameters() + .getMutableMap() + .put(parameters__.getKey(), parameters__.getValue()); + bitField0_ |= 0x00000200; + break; + } // case 82 + case 90: + { + input.readMessage( + internalGetSkewedInfoFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000400; + break; + } // case 90 + case 96: + { + storedAsSubDirs_ = input.readBool(); + bitField0_ |= 0x00000800; + break; + } // case 96 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List columns_ = + java.util.Collections.emptyList(); + + private void ensureColumnsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + columns_ = + new java.util.ArrayList(columns_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.FieldSchema, + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder, + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder> + columnsBuilder_; + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List getColumnsList() { + if (columnsBuilder_ == null) { + return java.util.Collections.unmodifiableList(columns_); + } else { + return columnsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getColumnsCount() { + if (columnsBuilder_ == null) { + return columns_.size(); + } else { + return columnsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchema getColumns(int index) { + if (columnsBuilder_ == null) { + return columns_.get(index); + } else { + return columnsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setColumns(int index, com.google.cloud.biglake.hive.v1beta.FieldSchema value) { + if (columnsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnsIsMutable(); + columns_.set(index, value); + onChanged(); + } else { + columnsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setColumns( + int index, com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder builderForValue) { + if (columnsBuilder_ == null) { + ensureColumnsIsMutable(); + columns_.set(index, builderForValue.build()); + onChanged(); + } else { + columnsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addColumns(com.google.cloud.biglake.hive.v1beta.FieldSchema value) { + if (columnsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnsIsMutable(); + columns_.add(value); + onChanged(); + } else { + columnsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addColumns(int index, com.google.cloud.biglake.hive.v1beta.FieldSchema value) { + if (columnsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnsIsMutable(); + columns_.add(index, value); + onChanged(); + } else { + columnsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addColumns( + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder builderForValue) { + if (columnsBuilder_ == null) { + ensureColumnsIsMutable(); + columns_.add(builderForValue.build()); + onChanged(); + } else { + columnsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addColumns( + int index, com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder builderForValue) { + if (columnsBuilder_ == null) { + ensureColumnsIsMutable(); + columns_.add(index, builderForValue.build()); + onChanged(); + } else { + columnsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllColumns( + java.lang.Iterable values) { + if (columnsBuilder_ == null) { + ensureColumnsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, columns_); + onChanged(); + } else { + columnsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearColumns() { + if (columnsBuilder_ == null) { + columns_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + columnsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeColumns(int index) { + if (columnsBuilder_ == null) { + ensureColumnsIsMutable(); + columns_.remove(index); + onChanged(); + } else { + columnsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder getColumnsBuilder(int index) { + return internalGetColumnsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder getColumnsOrBuilder( + int index) { + if (columnsBuilder_ == null) { + return columns_.get(index); + } else { + return columnsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getColumnsOrBuilderList() { + if (columnsBuilder_ != null) { + return columnsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(columns_); + } + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder addColumnsBuilder() { + return internalGetColumnsFieldBuilder() + .addBuilder(com.google.cloud.biglake.hive.v1beta.FieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder addColumnsBuilder(int index) { + return internalGetColumnsFieldBuilder() + .addBuilder(index, com.google.cloud.biglake.hive.v1beta.FieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Specifies the columns of the table.
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getColumnsBuilderList() { + return internalGetColumnsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.FieldSchema, + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder, + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder> + internalGetColumnsFieldBuilder() { + if (columnsBuilder_ == null) { + columnsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.FieldSchema, + com.google.cloud.biglake.hive.v1beta.FieldSchema.Builder, + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder>( + columns_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + columns_ = null; + } + return columnsBuilder_; + } + + private java.lang.Object locationUri_ = ""; + + /** + * + * + *
+     * Optional. The Cloud storage uri where the table is located.
+     * Defaults to `<database_location_uri>/<table_name>`. The maximum length is
+     * 4000 characters.
+     * 
+ * + * string location_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The locationUri. + */ + public java.lang.String getLocationUri() { + java.lang.Object ref = locationUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The Cloud storage uri where the table is located.
+     * Defaults to `<database_location_uri>/<table_name>`. The maximum length is
+     * 4000 characters.
+     * 
+ * + * string location_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for locationUri. + */ + public com.google.protobuf.ByteString getLocationUriBytes() { + java.lang.Object ref = locationUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The Cloud storage uri where the table is located.
+     * Defaults to `<database_location_uri>/<table_name>`. The maximum length is
+     * 4000 characters.
+     * 
+ * + * string location_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The locationUri to set. + * @return This builder for chaining. + */ + public Builder setLocationUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + locationUri_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The Cloud storage uri where the table is located.
+     * Defaults to `<database_location_uri>/<table_name>`. The maximum length is
+     * 4000 characters.
+     * 
+ * + * string location_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLocationUri() { + locationUri_ = getDefaultInstance().getLocationUri(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The Cloud storage uri where the table is located.
+     * Defaults to `<database_location_uri>/<table_name>`. The maximum length is
+     * 4000 characters.
+     * 
+ * + * string location_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for locationUri to set. + * @return This builder for chaining. + */ + public Builder setLocationUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + locationUri_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object inputFormat_ = ""; + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the input format. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string input_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The inputFormat. + */ + public java.lang.String getInputFormat() { + java.lang.Object ref = inputFormat_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + inputFormat_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the input format. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string input_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for inputFormat. + */ + public com.google.protobuf.ByteString getInputFormatBytes() { + java.lang.Object ref = inputFormat_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + inputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the input format. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string input_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The inputFormat to set. + * @return This builder for chaining. + */ + public Builder setInputFormat(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + inputFormat_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the input format. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string input_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearInputFormat() { + inputFormat_ = getDefaultInstance().getInputFormat(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the input format. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string input_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for inputFormat to set. + * @return This builder for chaining. + */ + public Builder setInputFormatBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + inputFormat_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object outputFormat_ = ""; + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the output format. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The outputFormat. + */ + public java.lang.String getOutputFormat() { + java.lang.Object ref = outputFormat_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + outputFormat_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the output format. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for outputFormat. + */ + public com.google.protobuf.ByteString getOutputFormatBytes() { + java.lang.Object ref = outputFormat_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + outputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the output format. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The outputFormat to set. + * @return This builder for chaining. + */ + public Builder setOutputFormat(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + outputFormat_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the output format. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearOutputFormat() { + outputFormat_ = getDefaultInstance().getOutputFormat(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The fully qualified Java class name of the output format. The
+     * maximum length is 4000 characters.
+     * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for outputFormat to set. + * @return This builder for chaining. + */ + public Builder setOutputFormatBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + outputFormat_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private boolean compressed_; + + /** + * + * + *
+     * Optional. Whether the table is compressed.
+     * 
+ * + * optional bool compressed = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the compressed field is set. + */ + @java.lang.Override + public boolean hasCompressed() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Optional. Whether the table is compressed.
+     * 
+ * + * optional bool compressed = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The compressed. + */ + @java.lang.Override + public boolean getCompressed() { + return compressed_; + } + + /** + * + * + *
+     * Optional. Whether the table is compressed.
+     * 
+ * + * optional bool compressed = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The compressed to set. + * @return This builder for chaining. + */ + public Builder setCompressed(boolean value) { + + compressed_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Whether the table is compressed.
+     * 
+ * + * optional bool compressed = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearCompressed() { + bitField0_ = (bitField0_ & ~0x00000010); + compressed_ = false; + onChanged(); + return this; + } + + private int numBuckets_; + + /** + * + * + *
+     * Optional. The number of buckets in the table.
+     * 
+ * + * optional int32 num_buckets = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the numBuckets field is set. + */ + @java.lang.Override + public boolean hasNumBuckets() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * Optional. The number of buckets in the table.
+     * 
+ * + * optional int32 num_buckets = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The numBuckets. + */ + @java.lang.Override + public int getNumBuckets() { + return numBuckets_; + } + + /** + * + * + *
+     * Optional. The number of buckets in the table.
+     * 
+ * + * optional int32 num_buckets = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The numBuckets to set. + * @return This builder for chaining. + */ + public Builder setNumBuckets(int value) { + + numBuckets_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The number of buckets in the table.
+     * 
+ * + * optional int32 num_buckets = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearNumBuckets() { + bitField0_ = (bitField0_ & ~0x00000020); + numBuckets_ = 0; + onChanged(); + return this; + } + + private com.google.cloud.biglake.hive.v1beta.SerdeInfo serdeInfo_; + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.SerdeInfo, + com.google.cloud.biglake.hive.v1beta.SerdeInfo.Builder, + com.google.cloud.biglake.hive.v1beta.SerdeInfoOrBuilder> + serdeInfoBuilder_; + + /** + * + * + *
+     * Optional. Serialization and deserialization information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the serdeInfo field is set. + */ + public boolean hasSerdeInfo() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Optional. Serialization and deserialization information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The serdeInfo. + */ + public com.google.cloud.biglake.hive.v1beta.SerdeInfo getSerdeInfo() { + if (serdeInfoBuilder_ == null) { + return serdeInfo_ == null + ? com.google.cloud.biglake.hive.v1beta.SerdeInfo.getDefaultInstance() + : serdeInfo_; + } else { + return serdeInfoBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Serialization and deserialization information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSerdeInfo(com.google.cloud.biglake.hive.v1beta.SerdeInfo value) { + if (serdeInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + serdeInfo_ = value; + } else { + serdeInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Serialization and deserialization information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSerdeInfo( + com.google.cloud.biglake.hive.v1beta.SerdeInfo.Builder builderForValue) { + if (serdeInfoBuilder_ == null) { + serdeInfo_ = builderForValue.build(); + } else { + serdeInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Serialization and deserialization information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeSerdeInfo(com.google.cloud.biglake.hive.v1beta.SerdeInfo value) { + if (serdeInfoBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && serdeInfo_ != null + && serdeInfo_ != com.google.cloud.biglake.hive.v1beta.SerdeInfo.getDefaultInstance()) { + getSerdeInfoBuilder().mergeFrom(value); + } else { + serdeInfo_ = value; + } + } else { + serdeInfoBuilder_.mergeFrom(value); + } + if (serdeInfo_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Serialization and deserialization information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSerdeInfo() { + bitField0_ = (bitField0_ & ~0x00000040); + serdeInfo_ = null; + if (serdeInfoBuilder_ != null) { + serdeInfoBuilder_.dispose(); + serdeInfoBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Serialization and deserialization information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.SerdeInfo.Builder getSerdeInfoBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetSerdeInfoFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Serialization and deserialization information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.SerdeInfoOrBuilder getSerdeInfoOrBuilder() { + if (serdeInfoBuilder_ != null) { + return serdeInfoBuilder_.getMessageOrBuilder(); + } else { + return serdeInfo_ == null + ? com.google.cloud.biglake.hive.v1beta.SerdeInfo.getDefaultInstance() + : serdeInfo_; + } + } + + /** + * + * + *
+     * Optional. Serialization and deserialization information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.SerdeInfo, + com.google.cloud.biglake.hive.v1beta.SerdeInfo.Builder, + com.google.cloud.biglake.hive.v1beta.SerdeInfoOrBuilder> + internalGetSerdeInfoFieldBuilder() { + if (serdeInfoBuilder_ == null) { + serdeInfoBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.SerdeInfo, + com.google.cloud.biglake.hive.v1beta.SerdeInfo.Builder, + com.google.cloud.biglake.hive.v1beta.SerdeInfoOrBuilder>( + getSerdeInfo(), getParentForChildren(), isClean()); + serdeInfo_ = null; + } + return serdeInfoBuilder_; + } + + private com.google.protobuf.LazyStringArrayList bucketCols_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureBucketColsIsMutable() { + if (!bucketCols_.isModifiable()) { + bucketCols_ = new com.google.protobuf.LazyStringArrayList(bucketCols_); + } + bitField0_ |= 0x00000080; + } + + /** + * + * + *
+     * Optional. Reducer grouping columns and clustering columns and bucketing
+     * columns
+     * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the bucketCols. + */ + public com.google.protobuf.ProtocolStringList getBucketColsList() { + bucketCols_.makeImmutable(); + return bucketCols_; + } + + /** + * + * + *
+     * Optional. Reducer grouping columns and clustering columns and bucketing
+     * columns
+     * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of bucketCols. + */ + public int getBucketColsCount() { + return bucketCols_.size(); + } + + /** + * + * + *
+     * Optional. Reducer grouping columns and clustering columns and bucketing
+     * columns
+     * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The bucketCols at the given index. + */ + public java.lang.String getBucketCols(int index) { + return bucketCols_.get(index); + } + + /** + * + * + *
+     * Optional. Reducer grouping columns and clustering columns and bucketing
+     * columns
+     * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the bucketCols at the given index. + */ + public com.google.protobuf.ByteString getBucketColsBytes(int index) { + return bucketCols_.getByteString(index); + } + + /** + * + * + *
+     * Optional. Reducer grouping columns and clustering columns and bucketing
+     * columns
+     * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The bucketCols to set. + * @return This builder for chaining. + */ + public Builder setBucketCols(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketColsIsMutable(); + bucketCols_.set(index, value); + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Reducer grouping columns and clustering columns and bucketing
+     * columns
+     * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bucketCols to add. + * @return This builder for chaining. + */ + public Builder addBucketCols(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketColsIsMutable(); + bucketCols_.add(value); + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Reducer grouping columns and clustering columns and bucketing
+     * columns
+     * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The bucketCols to add. + * @return This builder for chaining. + */ + public Builder addAllBucketCols(java.lang.Iterable values) { + ensureBucketColsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, bucketCols_); + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Reducer grouping columns and clustering columns and bucketing
+     * columns
+     * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearBucketCols() { + bucketCols_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + ; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Reducer grouping columns and clustering columns and bucketing
+     * columns
+     * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the bucketCols to add. + * @return This builder for chaining. + */ + public Builder addBucketColsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureBucketColsIsMutable(); + bucketCols_.add(value); + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + private java.util.List sortCols_ = + java.util.Collections.emptyList(); + + private void ensureSortColsIsMutable() { + if (!((bitField0_ & 0x00000100) != 0)) { + sortCols_ = + new java.util.ArrayList( + sortCols_); + bitField0_ |= 0x00000100; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.OrderOrBuilder> + sortColsBuilder_; + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getSortColsList() { + if (sortColsBuilder_ == null) { + return java.util.Collections.unmodifiableList(sortCols_); + } else { + return sortColsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getSortColsCount() { + if (sortColsBuilder_ == null) { + return sortCols_.size(); + } else { + return sortColsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order getSortCols(int index) { + if (sortColsBuilder_ == null) { + return sortCols_.get(index); + } else { + return sortColsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSortCols( + int index, com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order value) { + if (sortColsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSortColsIsMutable(); + sortCols_.set(index, value); + onChanged(); + } else { + sortColsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSortCols( + int index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.Builder builderForValue) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.set(index, builderForValue.build()); + onChanged(); + } else { + sortColsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addSortCols(com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order value) { + if (sortColsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSortColsIsMutable(); + sortCols_.add(value); + onChanged(); + } else { + sortColsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addSortCols( + int index, com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order value) { + if (sortColsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSortColsIsMutable(); + sortCols_.add(index, value); + onChanged(); + } else { + sortColsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addSortCols( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.Builder builderForValue) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.add(builderForValue.build()); + onChanged(); + } else { + sortColsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addSortCols( + int index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.Builder builderForValue) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.add(index, builderForValue.build()); + onChanged(); + } else { + sortColsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllSortCols( + java.lang.Iterable + values) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, sortCols_); + onChanged(); + } else { + sortColsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSortCols() { + if (sortColsBuilder_ == null) { + sortCols_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + } else { + sortColsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeSortCols(int index) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.remove(index); + onChanged(); + } else { + sortColsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.Builder getSortColsBuilder( + int index) { + return internalGetSortColsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.OrderOrBuilder + getSortColsOrBuilder(int index) { + if (sortColsBuilder_ == null) { + return sortCols_.get(index); + } else { + return sortColsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + ? extends com.google.cloud.biglake.hive.v1beta.StorageDescriptor.OrderOrBuilder> + getSortColsOrBuilderList() { + if (sortColsBuilder_ != null) { + return sortColsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(sortCols_); + } + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.Builder + addSortColsBuilder() { + return internalGetSortColsFieldBuilder() + .addBuilder( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.Builder addSortColsBuilder( + int index) { + return internalGetSortColsFieldBuilder() + .addBuilder( + index, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Sort order of the data in each bucket
+     * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getSortColsBuilderList() { + return internalGetSortColsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.OrderOrBuilder> + internalGetSortColsFieldBuilder() { + if (sortColsBuilder_ == null) { + sortColsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.OrderOrBuilder>( + sortCols_, ((bitField0_ & 0x00000100) != 0), getParentForChildren(), isClean()); + sortCols_ = null; + } + return sortColsBuilder_; + } + + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField + internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + private com.google.protobuf.MapField + internalGetMutableParameters() { + if (parameters_ == null) { + parameters_ = + com.google.protobuf.MapField.newMapField(ParametersDefaultEntryHolder.defaultEntry); + } + if (!parameters_.isMutable()) { + parameters_ = parameters_.copy(); + } + bitField0_ |= 0x00000200; + onChanged(); + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+     * Optional. Key-value pairs for the storage descriptor. The maximum size is
+     * 10Kib.
+     * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+     * Optional. Key-value pairs for the storage descriptor. The maximum size is
+     * 10Kib.
+     * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+     * Optional. Key-value pairs for the storage descriptor. The maximum size is
+     * 10Kib.
+     * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+     * Optional. Key-value pairs for the storage descriptor. The maximum size is
+     * 10Kib.
+     * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearParameters() { + bitField0_ = (bitField0_ & ~0x00000200); + internalGetMutableParameters().getMutableMap().clear(); + return this; + } + + /** + * + * + *
+     * Optional. Key-value pairs for the storage descriptor. The maximum size is
+     * 10Kib.
+     * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableParameters().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableParameters() { + bitField0_ |= 0x00000200; + return internalGetMutableParameters().getMutableMap(); + } + + /** + * + * + *
+     * Optional. Key-value pairs for the storage descriptor. The maximum size is
+     * 10Kib.
+     * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putParameters(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableParameters().getMutableMap().put(key, value); + bitField0_ |= 0x00000200; + return this; + } + + /** + * + * + *
+     * Optional. Key-value pairs for the storage descriptor. The maximum size is
+     * 10Kib.
+     * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllParameters(java.util.Map values) { + internalGetMutableParameters().getMutableMap().putAll(values); + bitField0_ |= 0x00000200; + return this; + } + + private com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewedInfo_; + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfoOrBuilder> + skewedInfoBuilder_; + + /** + * + * + *
+     * Optional. Table data skew information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the skewedInfo field is set. + */ + public boolean hasSkewedInfo() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
+     * Optional. Table data skew information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The skewedInfo. + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo getSkewedInfo() { + if (skewedInfoBuilder_ == null) { + return skewedInfo_ == null + ? com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.getDefaultInstance() + : skewedInfo_; + } else { + return skewedInfoBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Table data skew information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSkewedInfo( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo value) { + if (skewedInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + skewedInfo_ = value; + } else { + skewedInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Table data skew information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSkewedInfo( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.Builder builderForValue) { + if (skewedInfoBuilder_ == null) { + skewedInfo_ = builderForValue.build(); + } else { + skewedInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Table data skew information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeSkewedInfo( + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo value) { + if (skewedInfoBuilder_ == null) { + if (((bitField0_ & 0x00000400) != 0) + && skewedInfo_ != null + && skewedInfo_ + != com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo + .getDefaultInstance()) { + getSkewedInfoBuilder().mergeFrom(value); + } else { + skewedInfo_ = value; + } + } else { + skewedInfoBuilder_.mergeFrom(value); + } + if (skewedInfo_ != null) { + bitField0_ |= 0x00000400; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Table data skew information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSkewedInfo() { + bitField0_ = (bitField0_ & ~0x00000400); + skewedInfo_ = null; + if (skewedInfoBuilder_ != null) { + skewedInfoBuilder_.dispose(); + skewedInfoBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Table data skew information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.Builder + getSkewedInfoBuilder() { + bitField0_ |= 0x00000400; + onChanged(); + return internalGetSkewedInfoFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Table data skew information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfoOrBuilder + getSkewedInfoOrBuilder() { + if (skewedInfoBuilder_ != null) { + return skewedInfoBuilder_.getMessageOrBuilder(); + } else { + return skewedInfo_ == null + ? com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.getDefaultInstance() + : skewedInfo_; + } + } + + /** + * + * + *
+     * Optional. Table data skew information.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfoOrBuilder> + internalGetSkewedInfoFieldBuilder() { + if (skewedInfoBuilder_ == null) { + skewedInfoBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo.Builder, + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfoOrBuilder>( + getSkewedInfo(), getParentForChildren(), isClean()); + skewedInfo_ = null; + } + return skewedInfoBuilder_; + } + + private boolean storedAsSubDirs_; + + /** + * + * + *
+     * Optional. Whether the table is stored as sub directories.
+     * 
+ * + * optional bool stored_as_sub_dirs = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the storedAsSubDirs field is set. + */ + @java.lang.Override + public boolean hasStoredAsSubDirs() { + return ((bitField0_ & 0x00000800) != 0); + } + + /** + * + * + *
+     * Optional. Whether the table is stored as sub directories.
+     * 
+ * + * optional bool stored_as_sub_dirs = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The storedAsSubDirs. + */ + @java.lang.Override + public boolean getStoredAsSubDirs() { + return storedAsSubDirs_; + } + + /** + * + * + *
+     * Optional. Whether the table is stored as sub directories.
+     * 
+ * + * optional bool stored_as_sub_dirs = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The storedAsSubDirs to set. + * @return This builder for chaining. + */ + public Builder setStoredAsSubDirs(boolean value) { + + storedAsSubDirs_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Whether the table is stored as sub directories.
+     * 
+ * + * optional bool stored_as_sub_dirs = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearStoredAsSubDirs() { + bitField0_ = (bitField0_ & ~0x00000800); + storedAsSubDirs_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.StorageDescriptor) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.StorageDescriptor) + private static final com.google.cloud.biglake.hive.v1beta.StorageDescriptor DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.StorageDescriptor(); + } + + public static com.google.cloud.biglake.hive.v1beta.StorageDescriptor getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StorageDescriptor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.StorageDescriptor getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/StorageDescriptorOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/StorageDescriptorOrBuilder.java new file mode 100644 index 000000000000..73fe64d75ad0 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/StorageDescriptorOrBuilder.java @@ -0,0 +1,543 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface StorageDescriptorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.StorageDescriptor) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Specifies the columns of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getColumnsList(); + + /** + * + * + *
+   * Required. Specifies the columns of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.FieldSchema getColumns(int index); + + /** + * + * + *
+   * Required. Specifies the columns of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getColumnsCount(); + + /** + * + * + *
+   * Required. Specifies the columns of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getColumnsOrBuilderList(); + + /** + * + * + *
+   * Required. Specifies the columns of the table.
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.FieldSchema columns = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.FieldSchemaOrBuilder getColumnsOrBuilder(int index); + + /** + * + * + *
+   * Optional. The Cloud storage uri where the table is located.
+   * Defaults to `<database_location_uri>/<table_name>`. The maximum length is
+   * 4000 characters.
+   * 
+ * + * string location_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The locationUri. + */ + java.lang.String getLocationUri(); + + /** + * + * + *
+   * Optional. The Cloud storage uri where the table is located.
+   * Defaults to `<database_location_uri>/<table_name>`. The maximum length is
+   * 4000 characters.
+   * 
+ * + * string location_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for locationUri. + */ + com.google.protobuf.ByteString getLocationUriBytes(); + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the input format. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string input_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The inputFormat. + */ + java.lang.String getInputFormat(); + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the input format. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string input_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for inputFormat. + */ + com.google.protobuf.ByteString getInputFormatBytes(); + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the output format. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The outputFormat. + */ + java.lang.String getOutputFormat(); + + /** + * + * + *
+   * Optional. The fully qualified Java class name of the output format. The
+   * maximum length is 4000 characters.
+   * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for outputFormat. + */ + com.google.protobuf.ByteString getOutputFormatBytes(); + + /** + * + * + *
+   * Optional. Whether the table is compressed.
+   * 
+ * + * optional bool compressed = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the compressed field is set. + */ + boolean hasCompressed(); + + /** + * + * + *
+   * Optional. Whether the table is compressed.
+   * 
+ * + * optional bool compressed = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The compressed. + */ + boolean getCompressed(); + + /** + * + * + *
+   * Optional. The number of buckets in the table.
+   * 
+ * + * optional int32 num_buckets = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the numBuckets field is set. + */ + boolean hasNumBuckets(); + + /** + * + * + *
+   * Optional. The number of buckets in the table.
+   * 
+ * + * optional int32 num_buckets = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The numBuckets. + */ + int getNumBuckets(); + + /** + * + * + *
+   * Optional. Serialization and deserialization information.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the serdeInfo field is set. + */ + boolean hasSerdeInfo(); + + /** + * + * + *
+   * Optional. Serialization and deserialization information.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The serdeInfo. + */ + com.google.cloud.biglake.hive.v1beta.SerdeInfo getSerdeInfo(); + + /** + * + * + *
+   * Optional. Serialization and deserialization information.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.SerdeInfo serde_info = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.biglake.hive.v1beta.SerdeInfoOrBuilder getSerdeInfoOrBuilder(); + + /** + * + * + *
+   * Optional. Reducer grouping columns and clustering columns and bucketing
+   * columns
+   * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the bucketCols. + */ + java.util.List getBucketColsList(); + + /** + * + * + *
+   * Optional. Reducer grouping columns and clustering columns and bucketing
+   * columns
+   * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of bucketCols. + */ + int getBucketColsCount(); + + /** + * + * + *
+   * Optional. Reducer grouping columns and clustering columns and bucketing
+   * columns
+   * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The bucketCols at the given index. + */ + java.lang.String getBucketCols(int index); + + /** + * + * + *
+   * Optional. Reducer grouping columns and clustering columns and bucketing
+   * columns
+   * 
+ * + * repeated string bucket_cols = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the bucketCols at the given index. + */ + com.google.protobuf.ByteString getBucketColsBytes(int index); + + /** + * + * + *
+   * Optional. Sort order of the data in each bucket
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getSortColsList(); + + /** + * + * + *
+   * Optional. Sort order of the data in each bucket
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.Order getSortCols(int index); + + /** + * + * + *
+   * Optional. Sort order of the data in each bucket
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getSortColsCount(); + + /** + * + * + *
+   * Optional. Sort order of the data in each bucket
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getSortColsOrBuilderList(); + + /** + * + * + *
+   * Optional. Sort order of the data in each bucket
+   * 
+ * + * + * repeated .google.cloud.biglake.hive.v1beta.StorageDescriptor.Order sort_cols = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.OrderOrBuilder getSortColsOrBuilder( + int index); + + /** + * + * + *
+   * Optional. Key-value pairs for the storage descriptor. The maximum size is
+   * 10Kib.
+   * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getParametersCount(); + + /** + * + * + *
+   * Optional. Key-value pairs for the storage descriptor. The maximum size is
+   * 10Kib.
+   * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsParameters(java.lang.String key); + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Deprecated + java.util.Map getParameters(); + + /** + * + * + *
+   * Optional. Key-value pairs for the storage descriptor. The maximum size is
+   * 10Kib.
+   * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getParametersMap(); + + /** + * + * + *
+   * Optional. Key-value pairs for the storage descriptor. The maximum size is
+   * 10Kib.
+   * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + /* nullable */ + java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
+   * Optional. Key-value pairs for the storage descriptor. The maximum size is
+   * 10Kib.
+   * 
+ * + * map<string, string> parameters = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getParametersOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. Table data skew information.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the skewedInfo field is set. + */ + boolean hasSkewedInfo(); + + /** + * + * + *
+   * Optional. Table data skew information.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The skewedInfo. + */ + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo getSkewedInfo(); + + /** + * + * + *
+   * Optional. Table data skew information.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfo skewed_info = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.biglake.hive.v1beta.StorageDescriptor.SkewedInfoOrBuilder + getSkewedInfoOrBuilder(); + + /** + * + * + *
+   * Optional. Whether the table is stored as sub directories.
+   * 
+ * + * optional bool stored_as_sub_dirs = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the storedAsSubDirs field is set. + */ + boolean hasStoredAsSubDirs(); + + /** + * + * + *
+   * Optional. Whether the table is stored as sub directories.
+   * 
+ * + * optional bool stored_as_sub_dirs = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The storedAsSubDirs. + */ + boolean getStoredAsSubDirs(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/TableName.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/TableName.java new file mode 100644 index 000000000000..d8f0f7c47a72 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/TableName.java @@ -0,0 +1,257 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class TableName implements ResourceName { + private static final PathTemplate PROJECT_CATALOG_DATABASE_TABLE = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/catalogs/{catalog}/databases/{database}/tables/{table}"); + private volatile Map fieldValuesMap; + private final String project; + private final String catalog; + private final String database; + private final String table; + + @Deprecated + protected TableName() { + project = null; + catalog = null; + database = null; + table = null; + } + + private TableName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + catalog = Preconditions.checkNotNull(builder.getCatalog()); + database = Preconditions.checkNotNull(builder.getDatabase()); + table = Preconditions.checkNotNull(builder.getTable()); + } + + public String getProject() { + return project; + } + + public String getCatalog() { + return catalog; + } + + public String getDatabase() { + return database; + } + + public String getTable() { + return table; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static TableName of(String project, String catalog, String database, String table) { + return newBuilder() + .setProject(project) + .setCatalog(catalog) + .setDatabase(database) + .setTable(table) + .build(); + } + + public static String format(String project, String catalog, String database, String table) { + return newBuilder() + .setProject(project) + .setCatalog(catalog) + .setDatabase(database) + .setTable(table) + .build() + .toString(); + } + + public static TableName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_CATALOG_DATABASE_TABLE.validatedMatch( + formattedString, "TableName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("catalog"), + matchMap.get("database"), + matchMap.get("table")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (TableName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_CATALOG_DATABASE_TABLE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (catalog != null) { + fieldMapBuilder.put("catalog", catalog); + } + if (database != null) { + fieldMapBuilder.put("database", database); + } + if (table != null) { + fieldMapBuilder.put("table", table); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_CATALOG_DATABASE_TABLE.instantiate( + "project", project, "catalog", catalog, "database", database, "table", table); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + TableName that = ((TableName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.catalog, that.catalog) + && Objects.equals(this.database, that.database) + && Objects.equals(this.table, that.table); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(catalog); + h *= 1000003; + h ^= Objects.hashCode(database); + h *= 1000003; + h ^= Objects.hashCode(table); + return h; + } + + /** Builder for projects/{project}/catalogs/{catalog}/databases/{database}/tables/{table}. */ + public static class Builder { + private String project; + private String catalog; + private String database; + private String table; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getCatalog() { + return catalog; + } + + public String getDatabase() { + return database; + } + + public String getTable() { + return table; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setCatalog(String catalog) { + this.catalog = catalog; + return this; + } + + public Builder setDatabase(String database) { + this.database = database; + return this; + } + + public Builder setTable(String table) { + this.table = table; + return this; + } + + private Builder(TableName tableName) { + this.project = tableName.project; + this.catalog = tableName.catalog; + this.database = tableName.database; + this.table = tableName.table; + } + + public TableName build() { + return new TableName(this); + } + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveCatalogRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveCatalogRequest.java new file mode 100644 index 000000000000..48bdaae9eb8b --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveCatalogRequest.java @@ -0,0 +1,1103 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the UpdateHiveCatalog method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest} + */ +@com.google.protobuf.Generated +public final class UpdateHiveCatalogRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest) + UpdateHiveCatalogRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateHiveCatalogRequest"); + } + + // Use UpdateHiveCatalogRequest.newBuilder() to construct. + private UpdateHiveCatalogRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateHiveCatalogRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveCatalogRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveCatalogRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest.class, + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest.Builder.class); + } + + private int bitField0_; + public static final int HIVE_CATALOG_FIELD_NUMBER = 1; + private com.google.cloud.biglake.hive.v1beta.HiveCatalog hiveCatalog_; + + /** + * + * + *
+   * Required. The hive catalog to update.
+   * The name under the catalog is used to identify the catalog.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveCatalog field is set. + */ + @java.lang.Override + public boolean hasHiveCatalog() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The hive catalog to update.
+   * The name under the catalog is used to identify the catalog.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveCatalog. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalog getHiveCatalog() { + return hiveCatalog_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance() + : hiveCatalog_; + } + + /** + * + * + *
+   * Required. The hive catalog to update.
+   * The name under the catalog is used to identify the catalog.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder getHiveCatalogOrBuilder() { + return hiveCatalog_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance() + : hiveCatalog_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
+   * Optional. The list of fields to update.
+   *
+   * For the `FieldMask` definition, see
+   * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+   * If not set, defaults to all of the fields that are allowed to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. The list of fields to update.
+   *
+   * For the `FieldMask` definition, see
+   * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+   * If not set, defaults to all of the fields that are allowed to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
+   * Optional. The list of fields to update.
+   *
+   * For the `FieldMask` definition, see
+   * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+   * If not set, defaults to all of the fields that are allowed to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getHiveCatalog()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getHiveCatalog()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest other = + (com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest) obj; + + if (hasHiveCatalog() != other.hasHiveCatalog()) return false; + if (hasHiveCatalog()) { + if (!getHiveCatalog().equals(other.getHiveCatalog())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasHiveCatalog()) { + hash = (37 * hash) + HIVE_CATALOG_FIELD_NUMBER; + hash = (53 * hash) + getHiveCatalog().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the UpdateHiveCatalog method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest) + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveCatalogRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveCatalogRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest.class, + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetHiveCatalogFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + hiveCatalog_ = null; + if (hiveCatalogBuilder_ != null) { + hiveCatalogBuilder_.dispose(); + hiveCatalogBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveCatalogRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest build() { + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest result = + new com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.hiveCatalog_ = + hiveCatalogBuilder_ == null ? hiveCatalog_ : hiveCatalogBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest other) { + if (other + == com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest.getDefaultInstance()) + return this; + if (other.hasHiveCatalog()) { + mergeHiveCatalog(other.getHiveCatalog()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetHiveCatalogFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.biglake.hive.v1beta.HiveCatalog hiveCatalog_; + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveCatalog, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder, + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder> + hiveCatalogBuilder_; + + /** + * + * + *
+     * Required. The hive catalog to update.
+     * The name under the catalog is used to identify the catalog.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveCatalog field is set. + */ + public boolean hasHiveCatalog() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. The hive catalog to update.
+     * The name under the catalog is used to identify the catalog.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveCatalog. + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog getHiveCatalog() { + if (hiveCatalogBuilder_ == null) { + return hiveCatalog_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance() + : hiveCatalog_; + } else { + return hiveCatalogBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The hive catalog to update.
+     * The name under the catalog is used to identify the catalog.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setHiveCatalog(com.google.cloud.biglake.hive.v1beta.HiveCatalog value) { + if (hiveCatalogBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hiveCatalog_ = value; + } else { + hiveCatalogBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The hive catalog to update.
+     * The name under the catalog is used to identify the catalog.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setHiveCatalog( + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder builderForValue) { + if (hiveCatalogBuilder_ == null) { + hiveCatalog_ = builderForValue.build(); + } else { + hiveCatalogBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The hive catalog to update.
+     * The name under the catalog is used to identify the catalog.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeHiveCatalog(com.google.cloud.biglake.hive.v1beta.HiveCatalog value) { + if (hiveCatalogBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && hiveCatalog_ != null + && hiveCatalog_ + != com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance()) { + getHiveCatalogBuilder().mergeFrom(value); + } else { + hiveCatalog_ = value; + } + } else { + hiveCatalogBuilder_.mergeFrom(value); + } + if (hiveCatalog_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The hive catalog to update.
+     * The name under the catalog is used to identify the catalog.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearHiveCatalog() { + bitField0_ = (bitField0_ & ~0x00000001); + hiveCatalog_ = null; + if (hiveCatalogBuilder_ != null) { + hiveCatalogBuilder_.dispose(); + hiveCatalogBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The hive catalog to update.
+     * The name under the catalog is used to identify the catalog.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder getHiveCatalogBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetHiveCatalogFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The hive catalog to update.
+     * The name under the catalog is used to identify the catalog.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder getHiveCatalogOrBuilder() { + if (hiveCatalogBuilder_ != null) { + return hiveCatalogBuilder_.getMessageOrBuilder(); + } else { + return hiveCatalog_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveCatalog.getDefaultInstance() + : hiveCatalog_; + } + } + + /** + * + * + *
+     * Required. The hive catalog to update.
+     * The name under the catalog is used to identify the catalog.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveCatalog, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder, + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder> + internalGetHiveCatalogFieldBuilder() { + if (hiveCatalogBuilder_ == null) { + hiveCatalogBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveCatalog, + com.google.cloud.biglake.hive.v1beta.HiveCatalog.Builder, + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder>( + getHiveCatalog(), getParentForChildren(), isClean()); + hiveCatalog_ = null; + } + return hiveCatalogBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
+     * Optional. The list of fields to update.
+     *
+     * For the `FieldMask` definition, see
+     * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+     * If not set, defaults to all of the fields that are allowed to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     *
+     * For the `FieldMask` definition, see
+     * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+     * If not set, defaults to all of the fields that are allowed to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     *
+     * For the `FieldMask` definition, see
+     * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+     * If not set, defaults to all of the fields that are allowed to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     *
+     * For the `FieldMask` definition, see
+     * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+     * If not set, defaults to all of the fields that are allowed to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     *
+     * For the `FieldMask` definition, see
+     * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+     * If not set, defaults to all of the fields that are allowed to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     *
+     * For the `FieldMask` definition, see
+     * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+     * If not set, defaults to all of the fields that are allowed to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     *
+     * For the `FieldMask` definition, see
+     * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+     * If not set, defaults to all of the fields that are allowed to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     *
+     * For the `FieldMask` definition, see
+     * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+     * If not set, defaults to all of the fields that are allowed to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     *
+     * For the `FieldMask` definition, see
+     * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+     * If not set, defaults to all of the fields that are allowed to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest) + private static final com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateHiveCatalogRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveCatalogRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveCatalogRequestOrBuilder.java new file mode 100644 index 000000000000..0dbeaebeebaa --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveCatalogRequestOrBuilder.java @@ -0,0 +1,132 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface UpdateHiveCatalogRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The hive catalog to update.
+   * The name under the catalog is used to identify the catalog.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveCatalog field is set. + */ + boolean hasHiveCatalog(); + + /** + * + * + *
+   * Required. The hive catalog to update.
+   * The name under the catalog is used to identify the catalog.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveCatalog. + */ + com.google.cloud.biglake.hive.v1beta.HiveCatalog getHiveCatalog(); + + /** + * + * + *
+   * Required. The hive catalog to update.
+   * The name under the catalog is used to identify the catalog.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveCatalog hive_catalog = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveCatalogOrBuilder getHiveCatalogOrBuilder(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   *
+   * For the `FieldMask` definition, see
+   * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+   * If not set, defaults to all of the fields that are allowed to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   *
+   * For the `FieldMask` definition, see
+   * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+   * If not set, defaults to all of the fields that are allowed to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   *
+   * For the `FieldMask` definition, see
+   * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
+   * If not set, defaults to all of the fields that are allowed to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveDatabaseRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveDatabaseRequest.java new file mode 100644 index 000000000000..2a91a4ccc320 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveDatabaseRequest.java @@ -0,0 +1,1069 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the UpdateHiveDatabase method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest} + */ +@com.google.protobuf.Generated +public final class UpdateHiveDatabaseRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest) + UpdateHiveDatabaseRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateHiveDatabaseRequest"); + } + + // Use UpdateHiveDatabaseRequest.newBuilder() to construct. + private UpdateHiveDatabaseRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateHiveDatabaseRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest.class, + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest.Builder.class); + } + + private int bitField0_; + public static final int HIVE_DATABASE_FIELD_NUMBER = 1; + private com.google.cloud.biglake.hive.v1beta.HiveDatabase hiveDatabase_; + + /** + * + * + *
+   * Required. The database to update.
+   *
+   * The database's `name` field is used to identify the database to update.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveDatabase field is set. + */ + @java.lang.Override + public boolean hasHiveDatabase() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The database to update.
+   *
+   * The database's `name` field is used to identify the database to update.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveDatabase. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveDatabase getHiveDatabase() { + return hiveDatabase_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance() + : hiveDatabase_; + } + + /** + * + * + *
+   * Required. The database to update.
+   *
+   * The database's `name` field is used to identify the database to update.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder getHiveDatabaseOrBuilder() { + return hiveDatabase_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance() + : hiveDatabase_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getHiveDatabase()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getHiveDatabase()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest other = + (com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest) obj; + + if (hasHiveDatabase() != other.hasHiveDatabase()) return false; + if (hasHiveDatabase()) { + if (!getHiveDatabase().equals(other.getHiveDatabase())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasHiveDatabase()) { + hash = (37 * hash) + HIVE_DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getHiveDatabase().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the UpdateHiveDatabase method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest) + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest.class, + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetHiveDatabaseFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + hiveDatabase_ = null; + if (hiveDatabaseBuilder_ != null) { + hiveDatabaseBuilder_.dispose(); + hiveDatabaseBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveDatabaseRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest + getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest build() { + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest result = + new com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.hiveDatabase_ = + hiveDatabaseBuilder_ == null ? hiveDatabase_ : hiveDatabaseBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest other) { + if (other + == com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest.getDefaultInstance()) + return this; + if (other.hasHiveDatabase()) { + mergeHiveDatabase(other.getHiveDatabase()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetHiveDatabaseFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.biglake.hive.v1beta.HiveDatabase hiveDatabase_; + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveDatabase, + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder, + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder> + hiveDatabaseBuilder_; + + /** + * + * + *
+     * Required. The database to update.
+     *
+     * The database's `name` field is used to identify the database to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveDatabase field is set. + */ + public boolean hasHiveDatabase() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. The database to update.
+     *
+     * The database's `name` field is used to identify the database to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveDatabase. + */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase getHiveDatabase() { + if (hiveDatabaseBuilder_ == null) { + return hiveDatabase_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance() + : hiveDatabase_; + } else { + return hiveDatabaseBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The database to update.
+     *
+     * The database's `name` field is used to identify the database to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setHiveDatabase(com.google.cloud.biglake.hive.v1beta.HiveDatabase value) { + if (hiveDatabaseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hiveDatabase_ = value; + } else { + hiveDatabaseBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The database to update.
+     *
+     * The database's `name` field is used to identify the database to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setHiveDatabase( + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder builderForValue) { + if (hiveDatabaseBuilder_ == null) { + hiveDatabase_ = builderForValue.build(); + } else { + hiveDatabaseBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The database to update.
+     *
+     * The database's `name` field is used to identify the database to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeHiveDatabase(com.google.cloud.biglake.hive.v1beta.HiveDatabase value) { + if (hiveDatabaseBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && hiveDatabase_ != null + && hiveDatabase_ + != com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance()) { + getHiveDatabaseBuilder().mergeFrom(value); + } else { + hiveDatabase_ = value; + } + } else { + hiveDatabaseBuilder_.mergeFrom(value); + } + if (hiveDatabase_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The database to update.
+     *
+     * The database's `name` field is used to identify the database to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearHiveDatabase() { + bitField0_ = (bitField0_ & ~0x00000001); + hiveDatabase_ = null; + if (hiveDatabaseBuilder_ != null) { + hiveDatabaseBuilder_.dispose(); + hiveDatabaseBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The database to update.
+     *
+     * The database's `name` field is used to identify the database to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder getHiveDatabaseBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetHiveDatabaseFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The database to update.
+     *
+     * The database's `name` field is used to identify the database to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder getHiveDatabaseOrBuilder() { + if (hiveDatabaseBuilder_ != null) { + return hiveDatabaseBuilder_.getMessageOrBuilder(); + } else { + return hiveDatabase_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveDatabase.getDefaultInstance() + : hiveDatabase_; + } + } + + /** + * + * + *
+     * Required. The database to update.
+     *
+     * The database's `name` field is used to identify the database to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveDatabase, + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder, + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder> + internalGetHiveDatabaseFieldBuilder() { + if (hiveDatabaseBuilder_ == null) { + hiveDatabaseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveDatabase, + com.google.cloud.biglake.hive.v1beta.HiveDatabase.Builder, + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder>( + getHiveDatabase(), getParentForChildren(), isClean()); + hiveDatabase_ = null; + } + return hiveDatabaseBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest) + private static final com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateHiveDatabaseRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveDatabaseRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveDatabaseRequestOrBuilder.java new file mode 100644 index 000000000000..e4bb31dfa621 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveDatabaseRequestOrBuilder.java @@ -0,0 +1,123 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface UpdateHiveDatabaseRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The database to update.
+   *
+   * The database's `name` field is used to identify the database to update.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveDatabase field is set. + */ + boolean hasHiveDatabase(); + + /** + * + * + *
+   * Required. The database to update.
+   *
+   * The database's `name` field is used to identify the database to update.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveDatabase. + */ + com.google.cloud.biglake.hive.v1beta.HiveDatabase getHiveDatabase(); + + /** + * + * + *
+   * Required. The database to update.
+   *
+   * The database's `name` field is used to identify the database to update.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveDatabase hive_database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveDatabaseOrBuilder getHiveDatabaseOrBuilder(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveTableRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveTableRequest.java new file mode 100644 index 000000000000..49dd733d4cdc --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveTableRequest.java @@ -0,0 +1,1061 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for the UpdateHiveTable method.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest} + */ +@com.google.protobuf.Generated +public final class UpdateHiveTableRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest) + UpdateHiveTableRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateHiveTableRequest"); + } + + // Use UpdateHiveTableRequest.newBuilder() to construct. + private UpdateHiveTableRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateHiveTableRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveTableRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest.class, + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest.Builder.class); + } + + private int bitField0_; + public static final int HIVE_TABLE_FIELD_NUMBER = 1; + private com.google.cloud.biglake.hive.v1beta.HiveTable hiveTable_; + + /** + * + * + *
+   * Required. The table to update.
+   *
+   * The table's `name` field is used to identify the table to update.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveTable field is set. + */ + @java.lang.Override + public boolean hasHiveTable() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The table to update.
+   *
+   * The table's `name` field is used to identify the table to update.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveTable. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveTable getHiveTable() { + return hiveTable_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance() + : hiveTable_; + } + + /** + * + * + *
+   * Required. The table to update.
+   *
+   * The table's `name` field is used to identify the table to update.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder getHiveTableOrBuilder() { + return hiveTable_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance() + : hiveTable_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getHiveTable()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getHiveTable()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest other = + (com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest) obj; + + if (hasHiveTable() != other.hasHiveTable()) return false; + if (hasHiveTable()) { + if (!getHiveTable().equals(other.getHiveTable())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasHiveTable()) { + hash = (37 * hash) + HIVE_TABLE_FIELD_NUMBER; + hash = (53 * hash) + getHiveTable().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for the UpdateHiveTable method.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest) + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveTableRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest.class, + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetHiveTableFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + hiveTable_ = null; + if (hiveTableBuilder_ != null) { + hiveTableBuilder_.dispose(); + hiveTableBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdateHiveTableRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest build() { + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest result = + new com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.hiveTable_ = hiveTableBuilder_ == null ? hiveTable_ : hiveTableBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest other) { + if (other == com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest.getDefaultInstance()) + return this; + if (other.hasHiveTable()) { + mergeHiveTable(other.getHiveTable()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetHiveTableFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.biglake.hive.v1beta.HiveTable hiveTable_; + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveTable, + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder, + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder> + hiveTableBuilder_; + + /** + * + * + *
+     * Required. The table to update.
+     *
+     * The table's `name` field is used to identify the table to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveTable field is set. + */ + public boolean hasHiveTable() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. The table to update.
+     *
+     * The table's `name` field is used to identify the table to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveTable. + */ + public com.google.cloud.biglake.hive.v1beta.HiveTable getHiveTable() { + if (hiveTableBuilder_ == null) { + return hiveTable_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance() + : hiveTable_; + } else { + return hiveTableBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The table to update.
+     *
+     * The table's `name` field is used to identify the table to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setHiveTable(com.google.cloud.biglake.hive.v1beta.HiveTable value) { + if (hiveTableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hiveTable_ = value; + } else { + hiveTableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The table to update.
+     *
+     * The table's `name` field is used to identify the table to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setHiveTable( + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder builderForValue) { + if (hiveTableBuilder_ == null) { + hiveTable_ = builderForValue.build(); + } else { + hiveTableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The table to update.
+     *
+     * The table's `name` field is used to identify the table to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeHiveTable(com.google.cloud.biglake.hive.v1beta.HiveTable value) { + if (hiveTableBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && hiveTable_ != null + && hiveTable_ != com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance()) { + getHiveTableBuilder().mergeFrom(value); + } else { + hiveTable_ = value; + } + } else { + hiveTableBuilder_.mergeFrom(value); + } + if (hiveTable_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The table to update.
+     *
+     * The table's `name` field is used to identify the table to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearHiveTable() { + bitField0_ = (bitField0_ & ~0x00000001); + hiveTable_ = null; + if (hiveTableBuilder_ != null) { + hiveTableBuilder_.dispose(); + hiveTableBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The table to update.
+     *
+     * The table's `name` field is used to identify the table to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveTable.Builder getHiveTableBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetHiveTableFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The table to update.
+     *
+     * The table's `name` field is used to identify the table to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder getHiveTableOrBuilder() { + if (hiveTableBuilder_ != null) { + return hiveTableBuilder_.getMessageOrBuilder(); + } else { + return hiveTable_ == null + ? com.google.cloud.biglake.hive.v1beta.HiveTable.getDefaultInstance() + : hiveTable_; + } + } + + /** + * + * + *
+     * Required. The table to update.
+     *
+     * The table's `name` field is used to identify the table to update.
+     * Format:
+     * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveTable, + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder, + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder> + internalGetHiveTableFieldBuilder() { + if (hiveTableBuilder_ == null) { + hiveTableBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.HiveTable, + com.google.cloud.biglake.hive.v1beta.HiveTable.Builder, + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder>( + getHiveTable(), getParentForChildren(), isClean()); + hiveTable_ = null; + } + return hiveTableBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest) + private static final com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateHiveTableRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveTableRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveTableRequestOrBuilder.java new file mode 100644 index 000000000000..0ea914838db7 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdateHiveTableRequestOrBuilder.java @@ -0,0 +1,123 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface UpdateHiveTableRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The table to update.
+   *
+   * The table's `name` field is used to identify the table to update.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the hiveTable field is set. + */ + boolean hasHiveTable(); + + /** + * + * + *
+   * Required. The table to update.
+   *
+   * The table's `name` field is used to identify the table to update.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The hiveTable. + */ + com.google.cloud.biglake.hive.v1beta.HiveTable getHiveTable(); + + /** + * + * + *
+   * Required. The table to update.
+   *
+   * The table's `name` field is used to identify the table to update.
+   * Format:
+   * projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.HiveTable hive_table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.HiveTableOrBuilder getHiveTableOrBuilder(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdatePartitionRequest.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdatePartitionRequest.java new file mode 100644 index 000000000000..34cd06392002 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdatePartitionRequest.java @@ -0,0 +1,1013 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +/** + * + * + *
+ * Request message for UpdatePartition.
+ * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.UpdatePartitionRequest} + */ +@com.google.protobuf.Generated +public final class UpdatePartitionRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.cloud.biglake.hive.v1beta.UpdatePartitionRequest) + UpdatePartitionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdatePartitionRequest"); + } + + // Use UpdatePartitionRequest.newBuilder() to construct. + private UpdatePartitionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdatePartitionRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdatePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdatePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.class, + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.Builder.class); + } + + private int bitField0_; + public static final int PARTITION_FIELD_NUMBER = 1; + private com.google.cloud.biglake.hive.v1beta.Partition partition_; + + /** + * + * + *
+   * Required. The partition to be updated.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the partition field is set. + */ + @java.lang.Override + public boolean hasPartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The partition to be updated.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The partition. + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.Partition getPartition() { + return partition_ == null + ? com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance() + : partition_; + } + + /** + * + * + *
+   * Required. The partition to be updated.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionOrBuilder() { + return partition_ == null + ? com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance() + : partition_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getPartition()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getPartition()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest)) { + return super.equals(obj); + } + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest other = + (com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest) obj; + + if (hasPartition() != other.hasPartition()) return false; + if (hasPartition()) { + if (!getPartition().equals(other.getPartition())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasPartition()) { + hash = (37 * hash) + PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getPartition().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for UpdatePartition.
+   * 
+ * + * Protobuf type {@code google.cloud.biglake.hive.v1beta.UpdatePartitionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.biglake.hive.v1beta.UpdatePartitionRequest) + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdatePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdatePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.class, + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.Builder.class); + } + + // Construct using com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetPartitionFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + partition_ = null; + if (partitionBuilder_ != null) { + partitionBuilder_.dispose(); + partitionBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.biglake.hive.v1beta.HiveMetastoreProto + .internal_static_google_cloud_biglake_hive_v1beta_UpdatePartitionRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest getDefaultInstanceForType() { + return com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest build() { + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest buildPartial() { + com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest result = + new com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.partition_ = partitionBuilder_ == null ? partition_ : partitionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest) { + return mergeFrom((com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest other) { + if (other == com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest.getDefaultInstance()) + return this; + if (other.hasPartition()) { + mergePartition(other.getPartition()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetPartitionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.biglake.hive.v1beta.Partition partition_; + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder> + partitionBuilder_; + + /** + * + * + *
+     * Required. The partition to be updated.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the partition field is set. + */ + public boolean hasPartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. The partition to be updated.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The partition. + */ + public com.google.cloud.biglake.hive.v1beta.Partition getPartition() { + if (partitionBuilder_ == null) { + return partition_ == null + ? com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance() + : partition_; + } else { + return partitionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The partition to be updated.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartition(com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + partition_ = value; + } else { + partitionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The partition to be updated.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartition( + com.google.cloud.biglake.hive.v1beta.Partition.Builder builderForValue) { + if (partitionBuilder_ == null) { + partition_ = builderForValue.build(); + } else { + partitionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The partition to be updated.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergePartition(com.google.cloud.biglake.hive.v1beta.Partition value) { + if (partitionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && partition_ != null + && partition_ != com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance()) { + getPartitionBuilder().mergeFrom(value); + } else { + partition_ = value; + } + } else { + partitionBuilder_.mergeFrom(value); + } + if (partition_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The partition to be updated.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearPartition() { + bitField0_ = (bitField0_ & ~0x00000001); + partition_ = null; + if (partitionBuilder_ != null) { + partitionBuilder_.dispose(); + partitionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The partition to be updated.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.Partition.Builder getPartitionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetPartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The partition to be updated.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionOrBuilder() { + if (partitionBuilder_ != null) { + return partitionBuilder_.getMessageOrBuilder(); + } else { + return partition_ == null + ? com.google.cloud.biglake.hive.v1beta.Partition.getDefaultInstance() + : partition_; + } + } + + /** + * + * + *
+     * Required. The partition to be updated.
+     * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder> + internalGetPartitionFieldBuilder() { + if (partitionBuilder_ == null) { + partitionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.cloud.biglake.hive.v1beta.Partition, + com.google.cloud.biglake.hive.v1beta.Partition.Builder, + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder>( + getPartition(), getParentForChildren(), isClean()); + partition_ = null; + } + return partitionBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.cloud.biglake.hive.v1beta.UpdatePartitionRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.biglake.hive.v1beta.UpdatePartitionRequest) + private static final com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest(); + } + + public static com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdatePartitionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdatePartitionRequestOrBuilder.java b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdatePartitionRequestOrBuilder.java new file mode 100644 index 000000000000..fcf57b7672d3 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/java/com/google/cloud/biglake/hive/v1beta/UpdatePartitionRequestOrBuilder.java @@ -0,0 +1,111 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/cloud/biglake/hive/v1beta/hive_metastore.proto +// Protobuf Java Version: 4.33.2 + +package com.google.cloud.biglake.hive.v1beta; + +@com.google.protobuf.Generated +public interface UpdatePartitionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.biglake.hive.v1beta.UpdatePartitionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The partition to be updated.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the partition field is set. + */ + boolean hasPartition(); + + /** + * + * + *
+   * Required. The partition to be updated.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The partition. + */ + com.google.cloud.biglake.hive.v1beta.Partition getPartition(); + + /** + * + * + *
+   * Required. The partition to be updated.
+   * 
+ * + * + * .google.cloud.biglake.hive.v1beta.Partition partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.biglake.hive.v1beta.PartitionOrBuilder getPartitionOrBuilder(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); +} diff --git a/java-biglake/proto-google-cloud-biglake-v1beta/src/main/proto/google/cloud/biglake/hive/v1beta/hive_metastore.proto b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/proto/google/cloud/biglake/hive/v1beta/hive_metastore.proto new file mode 100644 index 000000000000..3cdf2daf9834 --- /dev/null +++ b/java-biglake/proto-google-cloud-biglake-v1beta/src/main/proto/google/cloud/biglake/hive/v1beta/hive_metastore.proto @@ -0,0 +1,931 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.biglake.hive.v1beta; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "cloud.google.com/go/biglake/hive/apiv1beta/hivepb;hivepb"; +option java_multiple_files = true; +option java_outer_classname = "HiveMetastoreProto"; +option java_package = "com.google.cloud.biglake.hive.v1beta"; + +// Hive Metastore Service is a biglake service that allows users to manage +// their external Hive catalogs. Full API compatibility with OSS Hive Metastore +// APIs is not supported. The methods match the Hive Metastore API spec mostly +// except for a few exceptions. +// These include listing resources with pattern, +// environment context which are combined in a single List API, return of +// ListResponse object instead of a list of resources, transactions, locks, etc. +// +// The BigLake Hive Metastore API defines the following resources: +// +// * A collection of Google Cloud projects: `/projects/*` +// * Each project has a collection of catalogs: `/catalogs/*` +// * Each catalog has a collection of databases: `/databases/*` +// * Each database has a collection of tables: `/tables/*` +service HiveMetastoreService { + option (google.api.default_host) = "biglake.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a new hive catalog. + rpc CreateHiveCatalog(CreateHiveCatalogRequest) returns (HiveCatalog) { + option (google.api.http) = { + post: "/hive/v1beta/{parent=projects/*}/catalogs" + body: "hive_catalog" + }; + option (google.api.method_signature) = + "parent,hive_catalog,hive_catalog_id"; + } + + // Gets the catalog specified by the resource name. + rpc GetHiveCatalog(GetHiveCatalogRequest) returns (HiveCatalog) { + option (google.api.http) = { + get: "/hive/v1beta/{name=projects/*/catalogs/*}" + }; + option (google.api.method_signature) = "name"; + } + + // List all catalogs in a specified project. + rpc ListHiveCatalogs(ListHiveCatalogsRequest) + returns (ListHiveCatalogsResponse) { + option (google.api.http) = { + get: "/hive/v1beta/{parent=projects/*}/catalogs" + }; + option (google.api.method_signature) = "parent"; + } + + // Updates an existing catalog. + rpc UpdateHiveCatalog(UpdateHiveCatalogRequest) returns (HiveCatalog) { + option (google.api.http) = { + patch: "/hive/v1beta/{hive_catalog.name=projects/*/catalogs/*}" + body: "hive_catalog" + }; + option (google.api.method_signature) = "hive_catalog,update_mask"; + } + + // Deletes an existing catalog specified by the catalog ID. Delete will fail + // if the catalog is not empty. + rpc DeleteHiveCatalog(DeleteHiveCatalogRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/hive/v1beta/{name=projects/*/catalogs/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Creates a new database. + rpc CreateHiveDatabase(CreateHiveDatabaseRequest) returns (HiveDatabase) { + option (google.api.http) = { + post: "/hive/v1beta/{parent=projects/*/catalogs/*}/databases" + body: "hive_database" + }; + option (google.api.method_signature) = + "parent,hive_database,hive_database_id"; + } + + // Gets the database specified by the resource name. + rpc GetHiveDatabase(GetHiveDatabaseRequest) returns (HiveDatabase) { + option (google.api.http) = { + get: "/hive/v1beta/{name=projects/*/catalogs/*/databases/*}" + }; + option (google.api.method_signature) = "name"; + } + + // List all databases in a specified catalog. + rpc ListHiveDatabases(ListHiveDatabasesRequest) + returns (ListHiveDatabasesResponse) { + option (google.api.http) = { + get: "/hive/v1beta/{parent=projects/*/catalogs/*}/databases" + }; + option (google.api.method_signature) = "parent"; + } + + // Updates an existing database specified by the database name. + rpc UpdateHiveDatabase(UpdateHiveDatabaseRequest) returns (HiveDatabase) { + option (google.api.http) = { + patch: "/hive/v1beta/{hive_database.name=projects/*/catalogs/*/databases/*}" + body: "hive_database" + }; + option (google.api.method_signature) = "hive_database,update_mask"; + } + + // Deletes an existing database specified by the database name. + rpc DeleteHiveDatabase(DeleteHiveDatabaseRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/hive/v1beta/{name=projects/*/catalogs/*/databases/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Creates a new hive table. + rpc CreateHiveTable(CreateHiveTableRequest) returns (HiveTable) { + option (google.api.http) = { + post: "/hive/v1beta/{parent=projects/*/catalogs/*/databases/*}/tables" + body: "hive_table" + }; + option (google.api.method_signature) = "parent,hive_table,hive_table_id"; + } + + // Gets the table specified by the resource name. + rpc GetHiveTable(GetHiveTableRequest) returns (HiveTable) { + option (google.api.http) = { + get: "/hive/v1beta/{name=projects/*/catalogs/*/databases/*/tables/*}" + }; + option (google.api.method_signature) = "name"; + } + + // List all hive tables in a specified project under the hive catalog and + // database. + rpc ListHiveTables(ListHiveTablesRequest) returns (ListHiveTablesResponse) { + option (google.api.http) = { + get: "/hive/v1beta/{parent=projects/*/catalogs/*/databases/*}/tables" + }; + option (google.api.method_signature) = "parent"; + } + + // Updates an existing table specified by the table name. + rpc UpdateHiveTable(UpdateHiveTableRequest) returns (HiveTable) { + option (google.api.http) = { + patch: "/hive/v1beta/{hive_table.name=projects/*/catalogs/*/databases/*/tables/*}" + body: "hive_table" + }; + option (google.api.method_signature) = "hive_table,update_mask"; + } + + // Deletes an existing table specified by the table name. + rpc DeleteHiveTable(DeleteHiveTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/hive/v1beta/{name=projects/*/catalogs/*/databases/*/tables/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Adds partitions to a table. + rpc BatchCreatePartitions(BatchCreatePartitionsRequest) + returns (BatchCreatePartitionsResponse) { + option (google.api.http) = { + post: "/hive/v1beta/{parent=projects/*/catalogs/*/databases/*/tables/*}/partitions:batchCreate" + body: "*" + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes partitions from a table. + rpc BatchDeletePartitions(BatchDeletePartitionsRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/hive/v1beta/{parent=projects/*/catalogs/*/databases/*/tables/*}/partitions:batchDelete" + body: "*" + }; + option (google.api.method_signature) = "parent"; + } + + // Updates partitions in a table. + rpc BatchUpdatePartitions(BatchUpdatePartitionsRequest) + returns (BatchUpdatePartitionsResponse) { + option (google.api.http) = { + post: "/hive/v1beta/{parent=projects/*/catalogs/*/databases/*/tables/*}/partitions:batchUpdate" + body: "*" + }; + option (google.api.method_signature) = "parent"; + } + + // Streams list of partitions from a table. + rpc ListPartitions(ListPartitionsRequest) + returns (stream ListPartitionsResponse) { + option (google.api.http) = { + get: "/hive/v1beta/{parent=projects/*/catalogs/*/databases/*/tables/*}/partitions:list" + }; + option (google.api.method_signature) = "parent"; + } +} + +// The HiveCatalog contains spark/hive databases and tables in the BigLake +// Metastore. While creating resources under a catalog, ideally ensure that the +// storage bucket location, spark / hive engine location or any other compute +// location match. Catalog can be viewed as the destination for migrating an +// on-prem Hive metastore to GCP. +message HiveCatalog { + option (google.api.resource) = { + type: "biglake.googleapis.com/Catalog" + pattern: "projects/{project}/catalogs/{catalog}" + plural: "catalogs" + singular: "catalog" + }; + + // The replica of the Catalog. + message Replica { + // If the catalog is replicated to multiple regions, this enum describes the + // current state of the replica. + enum State { + // The replica state is unknown. + STATE_UNSPECIFIED = 0; + + // Indicates the replica is the writable primary. + STATE_PRIMARY = 1; + + // Indicates the replica has been recently assigned as the primary, but + // not all databases are writeable yet. + STATE_PRIMARY_IN_PROGRESS = 2; + + // Indicates the replica is a read-only secondary replica. + STATE_SECONDARY = 3; + } + + // Output only. The region of the replica. For example `us-east1`. + string region = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The current state of the replica. + State state = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. The resource name. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id} + string name = 1 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Catalog" } + ]; + + // Optional. Stores the catalog description. + // The maximum length is 4000 characters. + string description = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The Cloud Storage location path where the catalog exists. + // Format: gs://bucket/path/to/catalog + // The maximum length is 4000 characters. + string location_uri = 3 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The replicas for the catalog metadata. + repeated Replica replicas = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request message for the CreateHiveCatalog method. +message CreateHiveCatalogRequest { + // Required. The parent resource where this catalog will be created. + // Format: projects/{project_id_or_number} + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Required. The catalog to create. + // The `name` field does not need to be provided. Gets copied over from + // catalog_id. + HiveCatalog hive_catalog = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Hive Catalog ID to use for the catalog that will become the + // final component of the catalog's resource name. The maximum length is 256 + // characters. + string hive_catalog_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The GCP region that specifies where the catalog metadata is + // stored, e.g. us-central1, EU, etc. + string primary_location = 4 + [json_name = "primary_location", (google.api.field_behavior) = REQUIRED]; +} + +// Request message for the GetHiveCatalog method. +message GetHiveCatalogRequest { + // Required. The name of the catalog to retrieve. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id} + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Catalog" } + ]; +} + +// Request message for the ListHiveCatalogs method. +message ListHiveCatalogsRequest { + // Required. The project to list catalogs from. + // Format: projects/{project_id_or_number} + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Optional. Page size for pagination. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Page token for pagination. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for the ListHiveCatalogs method. +message ListHiveCatalogsResponse { + // Output only. The catalogs from the specified project. + repeated HiveCatalog catalogs = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A token, which can be sent as `page_token` to retrieve the + // next page. If this field is omitted, there are no subsequent pages. + string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The list of unreachable cloud regions. If non-empty, the + // result set might be incomplete. + repeated string unreachable = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request message for the UpdateHiveCatalog method. +message UpdateHiveCatalogRequest { + // Required. The hive catalog to update. + // The name under the catalog is used to identify the catalog. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id} + HiveCatalog hive_catalog = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The list of fields to update. + // + // For the `FieldMask` definition, see + // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask + // If not set, defaults to all of the fields that are allowed to update. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for the DeleteHiveCatalog method. +message DeleteHiveCatalogRequest { + // Required. The name of the catalog to delete. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id} + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Catalog" } + ]; +} + +// Stores the hive database information. It includes the database name, +// description, location and properties associated with the database. +message HiveDatabase { + option (google.api.resource) = { + type: "biglake.googleapis.com/Namespace" + pattern: "projects/{project}/catalogs/{catalog}/databases/{database}" + plural: "namespaces" + singular: "namespace" + }; + + // Output only. The resource name. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + string name = 1 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { + type: "biglake.googleapis.com/Namespace" + } + ]; + + // Optional. Stores the database description. + // The maximum length is 4000 characters. + string description = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Cloud Storage location path where the database exists. + // Format: `gs://bucket/path/to/database` + // If unspecified, the database will be stored in the catalog location. + // The maximum length is 4000 characters. + string location_uri = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Stores the properties associated with the database. + // The maximum size is 2 MiB. + map parameters = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for the CreateHiveDatabase method. +message CreateHiveDatabaseRequest { + // Required. The parent resource where this database will be created. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id} + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Catalog" } + ]; + + // Required. The database to create. + // The `name` field does not need to be provided. + HiveDatabase hive_database = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID to use for the Hive Database. + // The maximum length is 128 characters. + string hive_database_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for the GetHiveDatabase method. +message GetHiveDatabaseRequest { + // Required. The name of the database to retrieve. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "biglake.googleapis.com/Namespace" + } + ]; +} + +// Request message for the ListHiveDatabases method. +message ListHiveDatabasesRequest { + // Required. The hive catalog to list databases from. + // Format: projects/{project_id_or_number}/catalogs/{catalog_id} + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Catalog" } + ]; + + // Optional. Page size for pagination. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. PageToken for pagination. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for the ListHiveDatabases method. +message ListHiveDatabasesResponse { + // Output only. The databases from the specified project and catalog. + repeated HiveDatabase databases = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A token, which can be sent as `page_token` to retrieve the + // next page. If this field is omitted, there are no subsequent pages. + string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request message for the UpdateHiveDatabase method. +message UpdateHiveDatabaseRequest { + // Required. The database to update. + // + // The database's `name` field is used to identify the database to update. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + HiveDatabase hive_database = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The list of fields to update. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for the DeleteHiveDatabase method. +message DeleteHiveDatabaseRequest { + // Required. The name of the database to delete. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "biglake.googleapis.com/Namespace" + } + ]; +} + +// Stores the hive table information. It includes the table name, schema (column +// names and types), data location, storage format, serde info, etc. This +// message closely matches the Table object in the IMetastoreClient +message HiveTable { + option (google.api.resource) = { + type: "biglake.googleapis.com/Table" + pattern: "projects/{project}/catalogs/{catalog}/databases/{database}/tables/{table}" + plural: "tables" + singular: "table" + }; + + // Output only. The resource name. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + string name = 1 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Table" } + ]; + + // Optional. Description of the table. The maximum length is 4000 characters. + string description = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Required. Storage descriptor of the table. + StorageDescriptor storage_descriptor = 3 + [(google.api.field_behavior) = REQUIRED]; + + // Output only. The creation time of the table. + google.protobuf.Timestamp create_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The partition keys of the table. + repeated FieldSchema partition_keys = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Stores the properties associated with the table. The maximum size + // is 4MiB. + map parameters = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The type of the table. This is EXTERNAL for BigLake hive + // tables. + string table_type = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Field schema information. +message FieldSchema { + // Required. Name of the field. The maximum length is 767 characters. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Type of the field. The maximum length is 128 characters. + string type = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Comment of the field. The maximum length is 256 characters. + string comment = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Contains information about the physical storage of the table data. +message StorageDescriptor { + // Sort order of the stored data per column. + message Order { + // Required. The column name. The maximum length is 767 characters. + string col = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Defines the sort order of the column. Ascending if 1, + // descending if 0. + int32 order = 2 [(google.api.field_behavior) = REQUIRED]; + } + + // Stores all the information about skewed table. + message SkewedInfo { + // The skewed column values. + message SkewedColumnValue { + // Required. The skewed column values. The maximum length is 256 + // characters per value. + repeated string values = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // The skewed key values and their corresponding location. + message SkewedKeyValuesLocation { + // Required. The skewed column values. The maximum length is 256 + // characters per value. + repeated string values = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The location of the skewed column values. The maximum length + // is 4000 characters. + string location = 2 [(google.api.field_behavior) = REQUIRED]; + } + + // Required. The column names that are skewed. The maximum length is 256 + // characters per column name. + repeated string skewed_col_names = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The skewed column values. + repeated SkewedColumnValue skewed_col_values = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The skewed key values locations. + repeated SkewedKeyValuesLocation skewed_key_values_locations = 3 + [(google.api.field_behavior) = REQUIRED]; + } + + // Required. Specifies the columns of the table. + repeated FieldSchema columns = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The Cloud storage uri where the table is located. + // Defaults to `/`. The maximum length is + // 4000 characters. + string location_uri = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The fully qualified Java class name of the input format. The + // maximum length is 4000 characters. + string input_format = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The fully qualified Java class name of the output format. The + // maximum length is 4000 characters. + string output_format = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Whether the table is compressed. + optional bool compressed = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The number of buckets in the table. + optional int32 num_buckets = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Serialization and deserialization information. + SerdeInfo serde_info = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Reducer grouping columns and clustering columns and bucketing + // columns + repeated string bucket_cols = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Sort order of the data in each bucket + repeated Order sort_cols = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Key-value pairs for the storage descriptor. The maximum size is + // 10Kib. + map parameters = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Table data skew information. + SkewedInfo skewed_info = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Whether the table is stored as sub directories. + optional bool stored_as_sub_dirs = 12 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Serialization and deserialization information. +message SerdeInfo { + // The serde types. + enum SerdeType { + // The serde type is not specified. + SERDE_TYPE_UNSPECIFIED = 0; + + // Hive. + HIVE = 1; + + // Schema registry. + SCHEMA_REGISTRY = 2; + } + + // Required. Name of the SerDe. Table name by default. The maximum length is + // 128 characters. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The fully qualified Java class name of the serialization library. + // The maximum length is 4000 characters. + string serialization_lib = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Description of the serde. The maximum length is 4000 characters. + string description = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Parameters of the serde. The maximum size is 10Kib. + map parameters = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The fully qualified Java class name of the serializer. The + // maximum length is 4000 characters. + string serializer_class = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The fully qualified Java class name of the deserializer. The + // maximum length is 4000 characters. + string deserializer_class = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The serde type. + SerdeType serde_type = 7 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for the CreateHiveTable method. +message CreateHiveTableRequest { + // Required. The parent resource for the table to be created. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "biglake.googleapis.com/Namespace" + } + ]; + + // Required. The Hive Table to create. + // The `name` field does not need to be provided. + HiveTable hive_table = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Hive Table ID to use for the table that will become the final + // component of the table's resource name. The maximum length is 256 + // characters. + string hive_table_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for the GetHiveTable method. +message GetHiveTableRequest { + // Required. The name of the table to retrieve. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Table" } + ]; +} + +// Request message for the ListHiveTables method. +message ListHiveTablesRequest { + // Required. The database to list tables from. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id} + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "biglake.googleapis.com/Namespace" + } + ]; + + // Optional. Page size for pagination. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. PageToken for pagination. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for the ListHiveTables method. +message ListHiveTablesResponse { + // Output only. The tables from the specified project, catalog and database. + repeated HiveTable tables = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A token, which can be sent as `page_token` to retrieve the + // next page. If this field is omitted, there are no subsequent pages. + string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request message for the UpdateHiveTable method. +message UpdateHiveTableRequest { + // Required. The table to update. + // + // The table's `name` field is used to identify the table to update. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + HiveTable hive_table = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The list of fields to update. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for the DeleteHiveTable method. +message DeleteHiveTableRequest { + // Required. The name of the database to delete. + // Format: + // projects/{project_id_or_number}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Table" } + ]; +} + +// Information about a Hive partition. +message Partition { + // Required. Represents the values of the partition keys, where each value + // corresponds to a specific partition key in the order in which the keys are + // defined. Each value is limited to 1024 characters. + repeated string values = 1 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The creation time of the partition. + google.protobuf.Timestamp create_time = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Contains information about the physical storage of the data in + // the partition. + StorageDescriptor storage_descriptor = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional parameters or metadata associated with the partition. + // Maximum size 10 KiB. + map parameters = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. List of columns. + repeated FieldSchema fields = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Represents the values of a partition. +message PartitionValues { + // Required. The values of the partition keys, where each value corresponds to + // a specific partition key in the order in which the keys are defined. + repeated string values = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for CreatePartition. The Partition is +// uniquely identified by values, which is an ordered list. Hence, there is no +// separate name or partition id field. +message CreatePartitionRequest { + // Required. Reference to the table to where the partition to be added, in the + // format of + // projects/{project}/catalogs/{catalogs}/databases/{databases}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Table" } + ]; + + // Required. The partition to be added. + Partition partition = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for the BatchCreatePartitions method. +message BatchCreatePartitionsRequest { + // Required. Reference to the table to where the partitions to be added, in + // the format of + // projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Table" } + ]; + + // Required. Requests to add partitions to the table. + repeated CreatePartitionRequest requests = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. Corresponds to the `ifNotExists` flag in the Hive Metastore APIs. + // If the flag is set to false, the server will return ALREADY_EXISTS if any + // partition already exists. If the flag is set to true, the server will skip + // existing partitions and insert only the non-existing partitions. + // A maximum of 900 partitions can be inserted in a batch. + bool skip_existing_partitions = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for BatchCreatePartitions. +message BatchCreatePartitionsResponse { + // The list of partitions that have been added. + repeated Partition partitions = 1; +} + +// Request message for BatchDeletePartitions. The Partition is +// uniquely identified by values, which is an ordered list. Hence, there is no +// separate name or partition id field. +message BatchDeletePartitionsRequest { + // Required. Reference to the table to which these partitions belong, in the + // format of + // projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Table" } + ]; + + // Required. The list of partitions (identified by its values) to be deleted. + // A maximum of 900 partitions can be deleted in a batch. + repeated PartitionValues partition_values = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for UpdatePartition. +message UpdatePartitionRequest { + // Required. The partition to be updated. + Partition partition = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The list of fields to update. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for BatchUpdatePartitions. +message BatchUpdatePartitionsRequest { + // Required. Reference to the table to which these partitions belong, in the + // format of + // projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Table" } + ]; + + // Required. Requests to update partitions in the table. + repeated UpdatePartitionRequest requests = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// Response message for BatchUpdatePartitions. +message BatchUpdatePartitionsResponse { + // The list of partitions that have been updated. + // A maximum of 900 partitions can be updated in a batch. + repeated Partition partitions = 1; +} + +// Request message for ListPartitions. +message ListPartitionsRequest { + // Required. Reference to the table to which these partitions belong, in the + // format of + // projects/{project}/catalogs/{catalogs}/databases/{database}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "biglake.googleapis.com/Table" } + ]; + + // Optional. SQL text filtering statement, similar to a `WHERE` clause in a + // query. Only supports single-row expressions. Aggregate functions are not + // supported. + // + // Examples: + // * `"int_field > 5"` + // * `"date_field = CAST('2014-9-27' as DATE)"` + // * `"nullable_field is not NULL"` + // * `"st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"` + // * `"numeric_field BETWEEN 1.0 AND 5.0"` + // + // Restricted to a maximum length of 1 MB. + string filter = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for ListPartitions. +message ListPartitionsResponse { + // Output only. List of partitions. + repeated Partition partitions = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchcreatepartitions/AsyncBatchCreatePartitions.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchcreatepartitions/AsyncBatchCreatePartitions.java new file mode 100644 index 000000000000..87dde12d36f7 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchcreatepartitions/AsyncBatchCreatePartitions.java @@ -0,0 +1,55 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_BatchCreatePartitions_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; +import java.util.ArrayList; + +public class AsyncBatchCreatePartitions { + + public static void main(String[] args) throws Exception { + asyncBatchCreatePartitions(); + } + + public static void asyncBatchCreatePartitions() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + BatchCreatePartitionsRequest request = + BatchCreatePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .addAllRequests(new ArrayList()) + .setSkipExistingPartitions(true) + .build(); + ApiFuture future = + hiveMetastoreServiceClient.batchCreatePartitionsCallable().futureCall(request); + // Do something. + BatchCreatePartitionsResponse response = future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_BatchCreatePartitions_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchcreatepartitions/SyncBatchCreatePartitions.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchcreatepartitions/SyncBatchCreatePartitions.java new file mode 100644 index 000000000000..23a643400402 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchcreatepartitions/SyncBatchCreatePartitions.java @@ -0,0 +1,52 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_BatchCreatePartitions_sync] +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.CreatePartitionRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; +import java.util.ArrayList; + +public class SyncBatchCreatePartitions { + + public static void main(String[] args) throws Exception { + syncBatchCreatePartitions(); + } + + public static void syncBatchCreatePartitions() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + BatchCreatePartitionsRequest request = + BatchCreatePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .addAllRequests(new ArrayList()) + .setSkipExistingPartitions(true) + .build(); + BatchCreatePartitionsResponse response = + hiveMetastoreServiceClient.batchCreatePartitions(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_BatchCreatePartitions_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchcreatepartitions/SyncBatchCreatePartitionsString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchcreatepartitions/SyncBatchCreatePartitionsString.java new file mode 100644 index 000000000000..fee3662453eb --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchcreatepartitions/SyncBatchCreatePartitionsString.java @@ -0,0 +1,44 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_BatchCreatePartitions_String_sync] +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; + +public class SyncBatchCreatePartitionsString { + + public static void main(String[] args) throws Exception { + syncBatchCreatePartitionsString(); + } + + public static void syncBatchCreatePartitionsString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString(); + BatchCreatePartitionsResponse response = + hiveMetastoreServiceClient.batchCreatePartitions(parent); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_BatchCreatePartitions_String_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchcreatepartitions/SyncBatchCreatePartitionsTablename.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchcreatepartitions/SyncBatchCreatePartitionsTablename.java new file mode 100644 index 000000000000..1f36478d7f7e --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchcreatepartitions/SyncBatchCreatePartitionsTablename.java @@ -0,0 +1,44 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_BatchCreatePartitions_Tablename_sync] +import com.google.cloud.biglake.hive.v1beta.BatchCreatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; + +public class SyncBatchCreatePartitionsTablename { + + public static void main(String[] args) throws Exception { + syncBatchCreatePartitionsTablename(); + } + + public static void syncBatchCreatePartitionsTablename() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + BatchCreatePartitionsResponse response = + hiveMetastoreServiceClient.batchCreatePartitions(parent); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_BatchCreatePartitions_Tablename_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchdeletepartitions/AsyncBatchDeletePartitions.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchdeletepartitions/AsyncBatchDeletePartitions.java new file mode 100644 index 000000000000..70bb4721cc6d --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchdeletepartitions/AsyncBatchDeletePartitions.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_BatchDeletePartitions_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.PartitionValues; +import com.google.cloud.biglake.hive.v1beta.TableName; +import com.google.protobuf.Empty; +import java.util.ArrayList; + +public class AsyncBatchDeletePartitions { + + public static void main(String[] args) throws Exception { + asyncBatchDeletePartitions(); + } + + public static void asyncBatchDeletePartitions() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + BatchDeletePartitionsRequest request = + BatchDeletePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .addAllPartitionValues(new ArrayList()) + .build(); + ApiFuture future = + hiveMetastoreServiceClient.batchDeletePartitionsCallable().futureCall(request); + // Do something. + future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_BatchDeletePartitions_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchdeletepartitions/SyncBatchDeletePartitions.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchdeletepartitions/SyncBatchDeletePartitions.java new file mode 100644 index 000000000000..4901f14d93b7 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchdeletepartitions/SyncBatchDeletePartitions.java @@ -0,0 +1,50 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_BatchDeletePartitions_sync] +import com.google.cloud.biglake.hive.v1beta.BatchDeletePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.PartitionValues; +import com.google.cloud.biglake.hive.v1beta.TableName; +import com.google.protobuf.Empty; +import java.util.ArrayList; + +public class SyncBatchDeletePartitions { + + public static void main(String[] args) throws Exception { + syncBatchDeletePartitions(); + } + + public static void syncBatchDeletePartitions() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + BatchDeletePartitionsRequest request = + BatchDeletePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .addAllPartitionValues(new ArrayList()) + .build(); + hiveMetastoreServiceClient.batchDeletePartitions(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_BatchDeletePartitions_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchdeletepartitions/SyncBatchDeletePartitionsString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchdeletepartitions/SyncBatchDeletePartitionsString.java new file mode 100644 index 000000000000..92e2e293f563 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchdeletepartitions/SyncBatchDeletePartitionsString.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_BatchDeletePartitions_String_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; +import com.google.protobuf.Empty; + +public class SyncBatchDeletePartitionsString { + + public static void main(String[] args) throws Exception { + syncBatchDeletePartitionsString(); + } + + public static void syncBatchDeletePartitionsString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString(); + hiveMetastoreServiceClient.batchDeletePartitions(parent); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_BatchDeletePartitions_String_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchdeletepartitions/SyncBatchDeletePartitionsTablename.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchdeletepartitions/SyncBatchDeletePartitionsTablename.java new file mode 100644 index 000000000000..65c610d1dec1 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchdeletepartitions/SyncBatchDeletePartitionsTablename.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_BatchDeletePartitions_Tablename_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; +import com.google.protobuf.Empty; + +public class SyncBatchDeletePartitionsTablename { + + public static void main(String[] args) throws Exception { + syncBatchDeletePartitionsTablename(); + } + + public static void syncBatchDeletePartitionsTablename() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + hiveMetastoreServiceClient.batchDeletePartitions(parent); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_BatchDeletePartitions_Tablename_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchupdatepartitions/AsyncBatchUpdatePartitions.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchupdatepartitions/AsyncBatchUpdatePartitions.java new file mode 100644 index 000000000000..3c72ebd71797 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchupdatepartitions/AsyncBatchUpdatePartitions.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_BatchUpdatePartitions_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; +import com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest; +import java.util.ArrayList; + +public class AsyncBatchUpdatePartitions { + + public static void main(String[] args) throws Exception { + asyncBatchUpdatePartitions(); + } + + public static void asyncBatchUpdatePartitions() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + BatchUpdatePartitionsRequest request = + BatchUpdatePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .addAllRequests(new ArrayList()) + .build(); + ApiFuture future = + hiveMetastoreServiceClient.batchUpdatePartitionsCallable().futureCall(request); + // Do something. + BatchUpdatePartitionsResponse response = future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_BatchUpdatePartitions_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchupdatepartitions/SyncBatchUpdatePartitions.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchupdatepartitions/SyncBatchUpdatePartitions.java new file mode 100644 index 000000000000..9d647d7e7362 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchupdatepartitions/SyncBatchUpdatePartitions.java @@ -0,0 +1,51 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_BatchUpdatePartitions_sync] +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; +import com.google.cloud.biglake.hive.v1beta.UpdatePartitionRequest; +import java.util.ArrayList; + +public class SyncBatchUpdatePartitions { + + public static void main(String[] args) throws Exception { + syncBatchUpdatePartitions(); + } + + public static void syncBatchUpdatePartitions() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + BatchUpdatePartitionsRequest request = + BatchUpdatePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .addAllRequests(new ArrayList()) + .build(); + BatchUpdatePartitionsResponse response = + hiveMetastoreServiceClient.batchUpdatePartitions(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_BatchUpdatePartitions_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchupdatepartitions/SyncBatchUpdatePartitionsString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchupdatepartitions/SyncBatchUpdatePartitionsString.java new file mode 100644 index 000000000000..74387315c08f --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchupdatepartitions/SyncBatchUpdatePartitionsString.java @@ -0,0 +1,44 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_BatchUpdatePartitions_String_sync] +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; + +public class SyncBatchUpdatePartitionsString { + + public static void main(String[] args) throws Exception { + syncBatchUpdatePartitionsString(); + } + + public static void syncBatchUpdatePartitionsString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString(); + BatchUpdatePartitionsResponse response = + hiveMetastoreServiceClient.batchUpdatePartitions(parent); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_BatchUpdatePartitions_String_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchupdatepartitions/SyncBatchUpdatePartitionsTablename.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchupdatepartitions/SyncBatchUpdatePartitionsTablename.java new file mode 100644 index 000000000000..fa62d41ec202 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/batchupdatepartitions/SyncBatchUpdatePartitionsTablename.java @@ -0,0 +1,44 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_BatchUpdatePartitions_Tablename_sync] +import com.google.cloud.biglake.hive.v1beta.BatchUpdatePartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; + +public class SyncBatchUpdatePartitionsTablename { + + public static void main(String[] args) throws Exception { + syncBatchUpdatePartitionsTablename(); + } + + public static void syncBatchUpdatePartitionsTablename() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + TableName parent = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + BatchUpdatePartitionsResponse response = + hiveMetastoreServiceClient.batchUpdatePartitions(parent); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_BatchUpdatePartitions_Tablename_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/create/SyncCreateSetCredentialsProvider.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/create/SyncCreateSetCredentialsProvider.java new file mode 100644 index 000000000000..af410fc4e9d5 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/create/SyncCreateSetCredentialsProvider.java @@ -0,0 +1,45 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_Create_SetCredentialsProvider_sync] +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceSettings; +import com.google.cloud.biglake.hive.v1beta.myCredentials; + +public class SyncCreateSetCredentialsProvider { + + public static void main(String[] args) throws Exception { + syncCreateSetCredentialsProvider(); + } + + public static void syncCreateSetCredentialsProvider() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + HiveMetastoreServiceSettings hiveMetastoreServiceSettings = + HiveMetastoreServiceSettings.newBuilder() + .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials)) + .build(); + HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create(hiveMetastoreServiceSettings); + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_Create_SetCredentialsProvider_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/create/SyncCreateSetEndpoint.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/create/SyncCreateSetEndpoint.java new file mode 100644 index 000000000000..c8b4a5070394 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/create/SyncCreateSetEndpoint.java @@ -0,0 +1,42 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_Create_SetEndpoint_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceSettings; +import com.google.cloud.biglake.hive.v1beta.myEndpoint; + +public class SyncCreateSetEndpoint { + + public static void main(String[] args) throws Exception { + syncCreateSetEndpoint(); + } + + public static void syncCreateSetEndpoint() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + HiveMetastoreServiceSettings hiveMetastoreServiceSettings = + HiveMetastoreServiceSettings.newBuilder().setEndpoint(myEndpoint).build(); + HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create(hiveMetastoreServiceSettings); + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_Create_SetEndpoint_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/create/SyncCreateUseHttpJsonTransport.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/create/SyncCreateUseHttpJsonTransport.java new file mode 100644 index 000000000000..4bd4efb82e7d --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/create/SyncCreateUseHttpJsonTransport.java @@ -0,0 +1,41 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_Create_UseHttpJsonTransport_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceSettings; + +public class SyncCreateUseHttpJsonTransport { + + public static void main(String[] args) throws Exception { + syncCreateUseHttpJsonTransport(); + } + + public static void syncCreateUseHttpJsonTransport() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + HiveMetastoreServiceSettings hiveMetastoreServiceSettings = + HiveMetastoreServiceSettings.newHttpJsonBuilder().build(); + HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create(hiveMetastoreServiceSettings); + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_Create_UseHttpJsonTransport_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivecatalog/AsyncCreateHiveCatalog.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivecatalog/AsyncCreateHiveCatalog.java new file mode 100644 index 000000000000..26e0da4c4392 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivecatalog/AsyncCreateHiveCatalog.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_CreateHiveCatalog_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ProjectName; + +public class AsyncCreateHiveCatalog { + + public static void main(String[] args) throws Exception { + asyncCreateHiveCatalog(); + } + + public static void asyncCreateHiveCatalog() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + CreateHiveCatalogRequest request = + CreateHiveCatalogRequest.newBuilder() + .setParent(ProjectName.of("[PROJECT]").toString()) + .setHiveCatalog(HiveCatalog.newBuilder().build()) + .setHiveCatalogId("hiveCatalogId-575314556") + .setPrimaryLocation("primaryLocation-1140723753") + .build(); + ApiFuture future = + hiveMetastoreServiceClient.createHiveCatalogCallable().futureCall(request); + // Do something. + HiveCatalog response = future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_CreateHiveCatalog_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivecatalog/SyncCreateHiveCatalog.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivecatalog/SyncCreateHiveCatalog.java new file mode 100644 index 000000000000..abb3a88b1492 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivecatalog/SyncCreateHiveCatalog.java @@ -0,0 +1,50 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_CreateHiveCatalog_sync] +import com.google.cloud.biglake.hive.v1beta.CreateHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ProjectName; + +public class SyncCreateHiveCatalog { + + public static void main(String[] args) throws Exception { + syncCreateHiveCatalog(); + } + + public static void syncCreateHiveCatalog() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + CreateHiveCatalogRequest request = + CreateHiveCatalogRequest.newBuilder() + .setParent(ProjectName.of("[PROJECT]").toString()) + .setHiveCatalog(HiveCatalog.newBuilder().build()) + .setHiveCatalogId("hiveCatalogId-575314556") + .setPrimaryLocation("primaryLocation-1140723753") + .build(); + HiveCatalog response = hiveMetastoreServiceClient.createHiveCatalog(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_CreateHiveCatalog_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivecatalog/SyncCreateHiveCatalogProjectnameHivecatalogString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivecatalog/SyncCreateHiveCatalogProjectnameHivecatalogString.java new file mode 100644 index 000000000000..0a265f36813b --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivecatalog/SyncCreateHiveCatalogProjectnameHivecatalogString.java @@ -0,0 +1,46 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_CreateHiveCatalog_ProjectnameHivecatalogString_sync] +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ProjectName; + +public class SyncCreateHiveCatalogProjectnameHivecatalogString { + + public static void main(String[] args) throws Exception { + syncCreateHiveCatalogProjectnameHivecatalogString(); + } + + public static void syncCreateHiveCatalogProjectnameHivecatalogString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + ProjectName parent = ProjectName.of("[PROJECT]"); + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + String hiveCatalogId = "hiveCatalogId-575314556"; + HiveCatalog response = + hiveMetastoreServiceClient.createHiveCatalog(parent, hiveCatalog, hiveCatalogId); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_CreateHiveCatalog_ProjectnameHivecatalogString_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivecatalog/SyncCreateHiveCatalogStringHivecatalogString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivecatalog/SyncCreateHiveCatalogStringHivecatalogString.java new file mode 100644 index 000000000000..3c9f8c3666d4 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivecatalog/SyncCreateHiveCatalogStringHivecatalogString.java @@ -0,0 +1,46 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_CreateHiveCatalog_StringHivecatalogString_sync] +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ProjectName; + +public class SyncCreateHiveCatalogStringHivecatalogString { + + public static void main(String[] args) throws Exception { + syncCreateHiveCatalogStringHivecatalogString(); + } + + public static void syncCreateHiveCatalogStringHivecatalogString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String parent = ProjectName.of("[PROJECT]").toString(); + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + String hiveCatalogId = "hiveCatalogId-575314556"; + HiveCatalog response = + hiveMetastoreServiceClient.createHiveCatalog(parent, hiveCatalog, hiveCatalogId); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_CreateHiveCatalog_StringHivecatalogString_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivedatabase/AsyncCreateHiveDatabase.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivedatabase/AsyncCreateHiveDatabase.java new file mode 100644 index 000000000000..88ec3a21d280 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivedatabase/AsyncCreateHiveDatabase.java @@ -0,0 +1,53 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_CreateHiveDatabase_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; + +public class AsyncCreateHiveDatabase { + + public static void main(String[] args) throws Exception { + asyncCreateHiveDatabase(); + } + + public static void asyncCreateHiveDatabase() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + CreateHiveDatabaseRequest request = + CreateHiveDatabaseRequest.newBuilder() + .setParent(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setHiveDatabase(HiveDatabase.newBuilder().build()) + .setHiveDatabaseId("hiveDatabaseId-1150232698") + .build(); + ApiFuture future = + hiveMetastoreServiceClient.createHiveDatabaseCallable().futureCall(request); + // Do something. + HiveDatabase response = future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_CreateHiveDatabase_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivedatabase/SyncCreateHiveDatabase.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivedatabase/SyncCreateHiveDatabase.java new file mode 100644 index 000000000000..1cfcae116507 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivedatabase/SyncCreateHiveDatabase.java @@ -0,0 +1,49 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_CreateHiveDatabase_sync] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.CreateHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; + +public class SyncCreateHiveDatabase { + + public static void main(String[] args) throws Exception { + syncCreateHiveDatabase(); + } + + public static void syncCreateHiveDatabase() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + CreateHiveDatabaseRequest request = + CreateHiveDatabaseRequest.newBuilder() + .setParent(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setHiveDatabase(HiveDatabase.newBuilder().build()) + .setHiveDatabaseId("hiveDatabaseId-1150232698") + .build(); + HiveDatabase response = hiveMetastoreServiceClient.createHiveDatabase(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_CreateHiveDatabase_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivedatabase/SyncCreateHiveDatabaseCatalognameHivedatabaseString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivedatabase/SyncCreateHiveDatabaseCatalognameHivedatabaseString.java new file mode 100644 index 000000000000..73b36ebd3857 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivedatabase/SyncCreateHiveDatabaseCatalognameHivedatabaseString.java @@ -0,0 +1,46 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_CreateHiveDatabase_CatalognameHivedatabaseString_sync] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; + +public class SyncCreateHiveDatabaseCatalognameHivedatabaseString { + + public static void main(String[] args) throws Exception { + syncCreateHiveDatabaseCatalognameHivedatabaseString(); + } + + public static void syncCreateHiveDatabaseCatalognameHivedatabaseString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + CatalogName parent = CatalogName.of("[PROJECT]", "[CATALOG]"); + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + String hiveDatabaseId = "hiveDatabaseId-1150232698"; + HiveDatabase response = + hiveMetastoreServiceClient.createHiveDatabase(parent, hiveDatabase, hiveDatabaseId); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_CreateHiveDatabase_CatalognameHivedatabaseString_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivedatabase/SyncCreateHiveDatabaseStringHivedatabaseString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivedatabase/SyncCreateHiveDatabaseStringHivedatabaseString.java new file mode 100644 index 000000000000..086922fcfee7 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivedatabase/SyncCreateHiveDatabaseStringHivedatabaseString.java @@ -0,0 +1,46 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_CreateHiveDatabase_StringHivedatabaseString_sync] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; + +public class SyncCreateHiveDatabaseStringHivedatabaseString { + + public static void main(String[] args) throws Exception { + syncCreateHiveDatabaseStringHivedatabaseString(); + } + + public static void syncCreateHiveDatabaseStringHivedatabaseString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String parent = CatalogName.of("[PROJECT]", "[CATALOG]").toString(); + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + String hiveDatabaseId = "hiveDatabaseId-1150232698"; + HiveDatabase response = + hiveMetastoreServiceClient.createHiveDatabase(parent, hiveDatabase, hiveDatabaseId); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_CreateHiveDatabase_StringHivedatabaseString_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivetable/AsyncCreateHiveTable.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivetable/AsyncCreateHiveTable.java new file mode 100644 index 000000000000..083a083ca65f --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivetable/AsyncCreateHiveTable.java @@ -0,0 +1,53 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_CreateHiveTable_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; + +public class AsyncCreateHiveTable { + + public static void main(String[] args) throws Exception { + asyncCreateHiveTable(); + } + + public static void asyncCreateHiveTable() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + CreateHiveTableRequest request = + CreateHiveTableRequest.newBuilder() + .setParent(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setHiveTable(HiveTable.newBuilder().build()) + .setHiveTableId("hiveTableId152241145") + .build(); + ApiFuture future = + hiveMetastoreServiceClient.createHiveTableCallable().futureCall(request); + // Do something. + HiveTable response = future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_CreateHiveTable_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivetable/SyncCreateHiveTable.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivetable/SyncCreateHiveTable.java new file mode 100644 index 000000000000..07c76beb5ff7 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivetable/SyncCreateHiveTable.java @@ -0,0 +1,49 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_CreateHiveTable_sync] +import com.google.cloud.biglake.hive.v1beta.CreateHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; + +public class SyncCreateHiveTable { + + public static void main(String[] args) throws Exception { + syncCreateHiveTable(); + } + + public static void syncCreateHiveTable() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + CreateHiveTableRequest request = + CreateHiveTableRequest.newBuilder() + .setParent(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setHiveTable(HiveTable.newBuilder().build()) + .setHiveTableId("hiveTableId152241145") + .build(); + HiveTable response = hiveMetastoreServiceClient.createHiveTable(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_CreateHiveTable_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivetable/SyncCreateHiveTableNamespacenameHivetableString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivetable/SyncCreateHiveTableNamespacenameHivetableString.java new file mode 100644 index 000000000000..4fc0cc769506 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivetable/SyncCreateHiveTableNamespacenameHivetableString.java @@ -0,0 +1,46 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_CreateHiveTable_NamespacenameHivetableString_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; + +public class SyncCreateHiveTableNamespacenameHivetableString { + + public static void main(String[] args) throws Exception { + syncCreateHiveTableNamespacenameHivetableString(); + } + + public static void syncCreateHiveTableNamespacenameHivetableString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + NamespaceName parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + HiveTable hiveTable = HiveTable.newBuilder().build(); + String hiveTableId = "hiveTableId152241145"; + HiveTable response = + hiveMetastoreServiceClient.createHiveTable(parent, hiveTable, hiveTableId); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_CreateHiveTable_NamespacenameHivetableString_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivetable/SyncCreateHiveTableStringHivetableString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivetable/SyncCreateHiveTableStringHivetableString.java new file mode 100644 index 000000000000..2531386b7e19 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/createhivetable/SyncCreateHiveTableStringHivetableString.java @@ -0,0 +1,46 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_CreateHiveTable_StringHivetableString_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; + +public class SyncCreateHiveTableStringHivetableString { + + public static void main(String[] args) throws Exception { + syncCreateHiveTableStringHivetableString(); + } + + public static void syncCreateHiveTableStringHivetableString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString(); + HiveTable hiveTable = HiveTable.newBuilder().build(); + String hiveTableId = "hiveTableId152241145"; + HiveTable response = + hiveMetastoreServiceClient.createHiveTable(parent, hiveTable, hiveTableId); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_CreateHiveTable_StringHivetableString_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivecatalog/AsyncDeleteHiveCatalog.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivecatalog/AsyncDeleteHiveCatalog.java new file mode 100644 index 000000000000..64b14ed1f37d --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivecatalog/AsyncDeleteHiveCatalog.java @@ -0,0 +1,51 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_DeleteHiveCatalog_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.protobuf.Empty; + +public class AsyncDeleteHiveCatalog { + + public static void main(String[] args) throws Exception { + asyncDeleteHiveCatalog(); + } + + public static void asyncDeleteHiveCatalog() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + DeleteHiveCatalogRequest request = + DeleteHiveCatalogRequest.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .build(); + ApiFuture future = + hiveMetastoreServiceClient.deleteHiveCatalogCallable().futureCall(request); + // Do something. + future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_DeleteHiveCatalog_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivecatalog/SyncDeleteHiveCatalog.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivecatalog/SyncDeleteHiveCatalog.java new file mode 100644 index 000000000000..718813820eca --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivecatalog/SyncDeleteHiveCatalog.java @@ -0,0 +1,47 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_DeleteHiveCatalog_sync] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.protobuf.Empty; + +public class SyncDeleteHiveCatalog { + + public static void main(String[] args) throws Exception { + syncDeleteHiveCatalog(); + } + + public static void syncDeleteHiveCatalog() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + DeleteHiveCatalogRequest request = + DeleteHiveCatalogRequest.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .build(); + hiveMetastoreServiceClient.deleteHiveCatalog(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_DeleteHiveCatalog_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivecatalog/SyncDeleteHiveCatalogCatalogname.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivecatalog/SyncDeleteHiveCatalogCatalogname.java new file mode 100644 index 000000000000..b250b7c20f37 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivecatalog/SyncDeleteHiveCatalogCatalogname.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_DeleteHiveCatalog_Catalogname_sync] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.protobuf.Empty; + +public class SyncDeleteHiveCatalogCatalogname { + + public static void main(String[] args) throws Exception { + syncDeleteHiveCatalogCatalogname(); + } + + public static void syncDeleteHiveCatalogCatalogname() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + CatalogName name = CatalogName.of("[PROJECT]", "[CATALOG]"); + hiveMetastoreServiceClient.deleteHiveCatalog(name); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_DeleteHiveCatalog_Catalogname_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivecatalog/SyncDeleteHiveCatalogString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivecatalog/SyncDeleteHiveCatalogString.java new file mode 100644 index 000000000000..b741975906aa --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivecatalog/SyncDeleteHiveCatalogString.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_DeleteHiveCatalog_String_sync] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.protobuf.Empty; + +public class SyncDeleteHiveCatalogString { + + public static void main(String[] args) throws Exception { + syncDeleteHiveCatalogString(); + } + + public static void syncDeleteHiveCatalogString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String name = CatalogName.of("[PROJECT]", "[CATALOG]").toString(); + hiveMetastoreServiceClient.deleteHiveCatalog(name); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_DeleteHiveCatalog_String_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivedatabase/AsyncDeleteHiveDatabase.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivedatabase/AsyncDeleteHiveDatabase.java new file mode 100644 index 000000000000..a39b1acd04ba --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivedatabase/AsyncDeleteHiveDatabase.java @@ -0,0 +1,51 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_DeleteHiveDatabase_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; +import com.google.protobuf.Empty; + +public class AsyncDeleteHiveDatabase { + + public static void main(String[] args) throws Exception { + asyncDeleteHiveDatabase(); + } + + public static void asyncDeleteHiveDatabase() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + DeleteHiveDatabaseRequest request = + DeleteHiveDatabaseRequest.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .build(); + ApiFuture future = + hiveMetastoreServiceClient.deleteHiveDatabaseCallable().futureCall(request); + // Do something. + future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_DeleteHiveDatabase_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivedatabase/SyncDeleteHiveDatabase.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivedatabase/SyncDeleteHiveDatabase.java new file mode 100644 index 000000000000..48b50ad33a13 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivedatabase/SyncDeleteHiveDatabase.java @@ -0,0 +1,47 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_DeleteHiveDatabase_sync] +import com.google.cloud.biglake.hive.v1beta.DeleteHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; +import com.google.protobuf.Empty; + +public class SyncDeleteHiveDatabase { + + public static void main(String[] args) throws Exception { + syncDeleteHiveDatabase(); + } + + public static void syncDeleteHiveDatabase() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + DeleteHiveDatabaseRequest request = + DeleteHiveDatabaseRequest.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .build(); + hiveMetastoreServiceClient.deleteHiveDatabase(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_DeleteHiveDatabase_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivedatabase/SyncDeleteHiveDatabaseNamespacename.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivedatabase/SyncDeleteHiveDatabaseNamespacename.java new file mode 100644 index 000000000000..3d58083126ae --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivedatabase/SyncDeleteHiveDatabaseNamespacename.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_DeleteHiveDatabase_Namespacename_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; +import com.google.protobuf.Empty; + +public class SyncDeleteHiveDatabaseNamespacename { + + public static void main(String[] args) throws Exception { + syncDeleteHiveDatabaseNamespacename(); + } + + public static void syncDeleteHiveDatabaseNamespacename() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + NamespaceName name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + hiveMetastoreServiceClient.deleteHiveDatabase(name); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_DeleteHiveDatabase_Namespacename_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivedatabase/SyncDeleteHiveDatabaseString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivedatabase/SyncDeleteHiveDatabaseString.java new file mode 100644 index 000000000000..4e4cc8016244 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivedatabase/SyncDeleteHiveDatabaseString.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_DeleteHiveDatabase_String_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; +import com.google.protobuf.Empty; + +public class SyncDeleteHiveDatabaseString { + + public static void main(String[] args) throws Exception { + syncDeleteHiveDatabaseString(); + } + + public static void syncDeleteHiveDatabaseString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString(); + hiveMetastoreServiceClient.deleteHiveDatabase(name); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_DeleteHiveDatabase_String_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivetable/AsyncDeleteHiveTable.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivetable/AsyncDeleteHiveTable.java new file mode 100644 index 000000000000..1db173147856 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivetable/AsyncDeleteHiveTable.java @@ -0,0 +1,51 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_DeleteHiveTable_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; +import com.google.protobuf.Empty; + +public class AsyncDeleteHiveTable { + + public static void main(String[] args) throws Exception { + asyncDeleteHiveTable(); + } + + public static void asyncDeleteHiveTable() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + DeleteHiveTableRequest request = + DeleteHiveTableRequest.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .build(); + ApiFuture future = + hiveMetastoreServiceClient.deleteHiveTableCallable().futureCall(request); + // Do something. + future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_DeleteHiveTable_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivetable/SyncDeleteHiveTable.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivetable/SyncDeleteHiveTable.java new file mode 100644 index 000000000000..3fea5b7e94f5 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivetable/SyncDeleteHiveTable.java @@ -0,0 +1,47 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_DeleteHiveTable_sync] +import com.google.cloud.biglake.hive.v1beta.DeleteHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; +import com.google.protobuf.Empty; + +public class SyncDeleteHiveTable { + + public static void main(String[] args) throws Exception { + syncDeleteHiveTable(); + } + + public static void syncDeleteHiveTable() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + DeleteHiveTableRequest request = + DeleteHiveTableRequest.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .build(); + hiveMetastoreServiceClient.deleteHiveTable(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_DeleteHiveTable_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivetable/SyncDeleteHiveTableString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivetable/SyncDeleteHiveTableString.java new file mode 100644 index 000000000000..0421f9eea070 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivetable/SyncDeleteHiveTableString.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_DeleteHiveTable_String_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; +import com.google.protobuf.Empty; + +public class SyncDeleteHiveTableString { + + public static void main(String[] args) throws Exception { + syncDeleteHiveTableString(); + } + + public static void syncDeleteHiveTableString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString(); + hiveMetastoreServiceClient.deleteHiveTable(name); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_DeleteHiveTable_String_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivetable/SyncDeleteHiveTableTablename.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivetable/SyncDeleteHiveTableTablename.java new file mode 100644 index 000000000000..c685bfc740b5 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/deletehivetable/SyncDeleteHiveTableTablename.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_DeleteHiveTable_Tablename_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.TableName; +import com.google.protobuf.Empty; + +public class SyncDeleteHiveTableTablename { + + public static void main(String[] args) throws Exception { + syncDeleteHiveTableTablename(); + } + + public static void syncDeleteHiveTableTablename() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + TableName name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + hiveMetastoreServiceClient.deleteHiveTable(name); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_DeleteHiveTable_Tablename_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivecatalog/AsyncGetHiveCatalog.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivecatalog/AsyncGetHiveCatalog.java new file mode 100644 index 000000000000..539f217f1d47 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivecatalog/AsyncGetHiveCatalog.java @@ -0,0 +1,51 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_GetHiveCatalog_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; + +public class AsyncGetHiveCatalog { + + public static void main(String[] args) throws Exception { + asyncGetHiveCatalog(); + } + + public static void asyncGetHiveCatalog() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + GetHiveCatalogRequest request = + GetHiveCatalogRequest.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .build(); + ApiFuture future = + hiveMetastoreServiceClient.getHiveCatalogCallable().futureCall(request); + // Do something. + HiveCatalog response = future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_GetHiveCatalog_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivecatalog/SyncGetHiveCatalog.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivecatalog/SyncGetHiveCatalog.java new file mode 100644 index 000000000000..301fed6f021e --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivecatalog/SyncGetHiveCatalog.java @@ -0,0 +1,47 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_GetHiveCatalog_sync] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.GetHiveCatalogRequest; +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; + +public class SyncGetHiveCatalog { + + public static void main(String[] args) throws Exception { + syncGetHiveCatalog(); + } + + public static void syncGetHiveCatalog() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + GetHiveCatalogRequest request = + GetHiveCatalogRequest.newBuilder() + .setName(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .build(); + HiveCatalog response = hiveMetastoreServiceClient.getHiveCatalog(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_GetHiveCatalog_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivecatalog/SyncGetHiveCatalogCatalogname.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivecatalog/SyncGetHiveCatalogCatalogname.java new file mode 100644 index 000000000000..e3e65774eaf8 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivecatalog/SyncGetHiveCatalogCatalogname.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_GetHiveCatalog_Catalogname_sync] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; + +public class SyncGetHiveCatalogCatalogname { + + public static void main(String[] args) throws Exception { + syncGetHiveCatalogCatalogname(); + } + + public static void syncGetHiveCatalogCatalogname() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + CatalogName name = CatalogName.of("[PROJECT]", "[CATALOG]"); + HiveCatalog response = hiveMetastoreServiceClient.getHiveCatalog(name); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_GetHiveCatalog_Catalogname_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivecatalog/SyncGetHiveCatalogString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivecatalog/SyncGetHiveCatalogString.java new file mode 100644 index 000000000000..f45cb50ae4ec --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivecatalog/SyncGetHiveCatalogString.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_GetHiveCatalog_String_sync] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; + +public class SyncGetHiveCatalogString { + + public static void main(String[] args) throws Exception { + syncGetHiveCatalogString(); + } + + public static void syncGetHiveCatalogString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String name = CatalogName.of("[PROJECT]", "[CATALOG]").toString(); + HiveCatalog response = hiveMetastoreServiceClient.getHiveCatalog(name); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_GetHiveCatalog_String_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivedatabase/AsyncGetHiveDatabase.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivedatabase/AsyncGetHiveDatabase.java new file mode 100644 index 000000000000..f65dc867ad78 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivedatabase/AsyncGetHiveDatabase.java @@ -0,0 +1,51 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_GetHiveDatabase_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; + +public class AsyncGetHiveDatabase { + + public static void main(String[] args) throws Exception { + asyncGetHiveDatabase(); + } + + public static void asyncGetHiveDatabase() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + GetHiveDatabaseRequest request = + GetHiveDatabaseRequest.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .build(); + ApiFuture future = + hiveMetastoreServiceClient.getHiveDatabaseCallable().futureCall(request); + // Do something. + HiveDatabase response = future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_GetHiveDatabase_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivedatabase/SyncGetHiveDatabase.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivedatabase/SyncGetHiveDatabase.java new file mode 100644 index 000000000000..02cb4ae76375 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivedatabase/SyncGetHiveDatabase.java @@ -0,0 +1,47 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_GetHiveDatabase_sync] +import com.google.cloud.biglake.hive.v1beta.GetHiveDatabaseRequest; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; + +public class SyncGetHiveDatabase { + + public static void main(String[] args) throws Exception { + syncGetHiveDatabase(); + } + + public static void syncGetHiveDatabase() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + GetHiveDatabaseRequest request = + GetHiveDatabaseRequest.newBuilder() + .setName(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .build(); + HiveDatabase response = hiveMetastoreServiceClient.getHiveDatabase(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_GetHiveDatabase_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivedatabase/SyncGetHiveDatabaseNamespacename.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivedatabase/SyncGetHiveDatabaseNamespacename.java new file mode 100644 index 000000000000..b518f105cedc --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivedatabase/SyncGetHiveDatabaseNamespacename.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_GetHiveDatabase_Namespacename_sync] +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; + +public class SyncGetHiveDatabaseNamespacename { + + public static void main(String[] args) throws Exception { + syncGetHiveDatabaseNamespacename(); + } + + public static void syncGetHiveDatabaseNamespacename() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + NamespaceName name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + HiveDatabase response = hiveMetastoreServiceClient.getHiveDatabase(name); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_GetHiveDatabase_Namespacename_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivedatabase/SyncGetHiveDatabaseString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivedatabase/SyncGetHiveDatabaseString.java new file mode 100644 index 000000000000..bf29023116a7 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivedatabase/SyncGetHiveDatabaseString.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_GetHiveDatabase_String_sync] +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; + +public class SyncGetHiveDatabaseString { + + public static void main(String[] args) throws Exception { + syncGetHiveDatabaseString(); + } + + public static void syncGetHiveDatabaseString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String name = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString(); + HiveDatabase response = hiveMetastoreServiceClient.getHiveDatabase(name); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_GetHiveDatabase_String_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivetable/AsyncGetHiveTable.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivetable/AsyncGetHiveTable.java new file mode 100644 index 000000000000..7dbb1bcf0e88 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivetable/AsyncGetHiveTable.java @@ -0,0 +1,51 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_GetHiveTable_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.TableName; + +public class AsyncGetHiveTable { + + public static void main(String[] args) throws Exception { + asyncGetHiveTable(); + } + + public static void asyncGetHiveTable() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + GetHiveTableRequest request = + GetHiveTableRequest.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .build(); + ApiFuture future = + hiveMetastoreServiceClient.getHiveTableCallable().futureCall(request); + // Do something. + HiveTable response = future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_GetHiveTable_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivetable/SyncGetHiveTable.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivetable/SyncGetHiveTable.java new file mode 100644 index 000000000000..d4c7f39c4801 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivetable/SyncGetHiveTable.java @@ -0,0 +1,47 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_GetHiveTable_sync] +import com.google.cloud.biglake.hive.v1beta.GetHiveTableRequest; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.TableName; + +public class SyncGetHiveTable { + + public static void main(String[] args) throws Exception { + syncGetHiveTable(); + } + + public static void syncGetHiveTable() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + GetHiveTableRequest request = + GetHiveTableRequest.newBuilder() + .setName(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .build(); + HiveTable response = hiveMetastoreServiceClient.getHiveTable(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_GetHiveTable_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivetable/SyncGetHiveTableString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivetable/SyncGetHiveTableString.java new file mode 100644 index 000000000000..0c688a3736d0 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivetable/SyncGetHiveTableString.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_GetHiveTable_String_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.TableName; + +public class SyncGetHiveTableString { + + public static void main(String[] args) throws Exception { + syncGetHiveTableString(); + } + + public static void syncGetHiveTableString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString(); + HiveTable response = hiveMetastoreServiceClient.getHiveTable(name); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_GetHiveTable_String_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivetable/SyncGetHiveTableTablename.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivetable/SyncGetHiveTableTablename.java new file mode 100644 index 000000000000..256297e75fab --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/gethivetable/SyncGetHiveTableTablename.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_GetHiveTable_Tablename_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.TableName; + +public class SyncGetHiveTableTablename { + + public static void main(String[] args) throws Exception { + syncGetHiveTableTablename(); + } + + public static void syncGetHiveTableTablename() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + TableName name = TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]"); + HiveTable response = hiveMetastoreServiceClient.getHiveTable(name); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_GetHiveTable_Tablename_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/AsyncListHiveCatalogs.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/AsyncListHiveCatalogs.java new file mode 100644 index 000000000000..9ad68812cee8 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/AsyncListHiveCatalogs.java @@ -0,0 +1,55 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveCatalogs_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest; +import com.google.cloud.biglake.hive.v1beta.ProjectName; + +public class AsyncListHiveCatalogs { + + public static void main(String[] args) throws Exception { + asyncListHiveCatalogs(); + } + + public static void asyncListHiveCatalogs() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + ListHiveCatalogsRequest request = + ListHiveCatalogsRequest.newBuilder() + .setParent(ProjectName.of("[PROJECT]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + ApiFuture future = + hiveMetastoreServiceClient.listHiveCatalogsPagedCallable().futureCall(request); + // Do something. + for (HiveCatalog element : future.get().iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveCatalogs_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/AsyncListHiveCatalogsPaged.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/AsyncListHiveCatalogsPaged.java new file mode 100644 index 000000000000..078ef59ad00d --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/AsyncListHiveCatalogsPaged.java @@ -0,0 +1,63 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveCatalogs_Paged_async] +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsResponse; +import com.google.cloud.biglake.hive.v1beta.ProjectName; +import com.google.common.base.Strings; + +public class AsyncListHiveCatalogsPaged { + + public static void main(String[] args) throws Exception { + asyncListHiveCatalogsPaged(); + } + + public static void asyncListHiveCatalogsPaged() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + ListHiveCatalogsRequest request = + ListHiveCatalogsRequest.newBuilder() + .setParent(ProjectName.of("[PROJECT]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + while (true) { + ListHiveCatalogsResponse response = + hiveMetastoreServiceClient.listHiveCatalogsCallable().call(request); + for (HiveCatalog element : response.getCatalogsList()) { + // doThingsWith(element); + } + String nextPageToken = response.getNextPageToken(); + if (!Strings.isNullOrEmpty(nextPageToken)) { + request = request.toBuilder().setPageToken(nextPageToken).build(); + } else { + break; + } + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveCatalogs_Paged_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/SyncListHiveCatalogs.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/SyncListHiveCatalogs.java new file mode 100644 index 000000000000..ef2eafcbc07b --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/SyncListHiveCatalogs.java @@ -0,0 +1,52 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveCatalogs_sync] +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ListHiveCatalogsRequest; +import com.google.cloud.biglake.hive.v1beta.ProjectName; + +public class SyncListHiveCatalogs { + + public static void main(String[] args) throws Exception { + syncListHiveCatalogs(); + } + + public static void syncListHiveCatalogs() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + ListHiveCatalogsRequest request = + ListHiveCatalogsRequest.newBuilder() + .setParent(ProjectName.of("[PROJECT]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + for (HiveCatalog element : + hiveMetastoreServiceClient.listHiveCatalogs(request).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveCatalogs_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/SyncListHiveCatalogsProjectname.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/SyncListHiveCatalogsProjectname.java new file mode 100644 index 000000000000..a381bf08393a --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/SyncListHiveCatalogsProjectname.java @@ -0,0 +1,45 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveCatalogs_Projectname_sync] +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ProjectName; + +public class SyncListHiveCatalogsProjectname { + + public static void main(String[] args) throws Exception { + syncListHiveCatalogsProjectname(); + } + + public static void syncListHiveCatalogsProjectname() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + ProjectName parent = ProjectName.of("[PROJECT]"); + for (HiveCatalog element : hiveMetastoreServiceClient.listHiveCatalogs(parent).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveCatalogs_Projectname_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/SyncListHiveCatalogsString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/SyncListHiveCatalogsString.java new file mode 100644 index 000000000000..275d4ba63451 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivecatalogs/SyncListHiveCatalogsString.java @@ -0,0 +1,45 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveCatalogs_String_sync] +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ProjectName; + +public class SyncListHiveCatalogsString { + + public static void main(String[] args) throws Exception { + syncListHiveCatalogsString(); + } + + public static void syncListHiveCatalogsString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String parent = ProjectName.of("[PROJECT]").toString(); + for (HiveCatalog element : hiveMetastoreServiceClient.listHiveCatalogs(parent).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveCatalogs_String_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/AsyncListHiveDatabases.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/AsyncListHiveDatabases.java new file mode 100644 index 000000000000..422b1473fd0f --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/AsyncListHiveDatabases.java @@ -0,0 +1,55 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveDatabases_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest; + +public class AsyncListHiveDatabases { + + public static void main(String[] args) throws Exception { + asyncListHiveDatabases(); + } + + public static void asyncListHiveDatabases() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + ListHiveDatabasesRequest request = + ListHiveDatabasesRequest.newBuilder() + .setParent(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + ApiFuture future = + hiveMetastoreServiceClient.listHiveDatabasesPagedCallable().futureCall(request); + // Do something. + for (HiveDatabase element : future.get().iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveDatabases_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/AsyncListHiveDatabasesPaged.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/AsyncListHiveDatabasesPaged.java new file mode 100644 index 000000000000..a4a2aadb06af --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/AsyncListHiveDatabasesPaged.java @@ -0,0 +1,63 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveDatabases_Paged_async] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesResponse; +import com.google.common.base.Strings; + +public class AsyncListHiveDatabasesPaged { + + public static void main(String[] args) throws Exception { + asyncListHiveDatabasesPaged(); + } + + public static void asyncListHiveDatabasesPaged() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + ListHiveDatabasesRequest request = + ListHiveDatabasesRequest.newBuilder() + .setParent(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + while (true) { + ListHiveDatabasesResponse response = + hiveMetastoreServiceClient.listHiveDatabasesCallable().call(request); + for (HiveDatabase element : response.getDatabasesList()) { + // doThingsWith(element); + } + String nextPageToken = response.getNextPageToken(); + if (!Strings.isNullOrEmpty(nextPageToken)) { + request = request.toBuilder().setPageToken(nextPageToken).build(); + } else { + break; + } + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveDatabases_Paged_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/SyncListHiveDatabases.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/SyncListHiveDatabases.java new file mode 100644 index 000000000000..38fc883784da --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/SyncListHiveDatabases.java @@ -0,0 +1,52 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveDatabases_sync] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ListHiveDatabasesRequest; + +public class SyncListHiveDatabases { + + public static void main(String[] args) throws Exception { + syncListHiveDatabases(); + } + + public static void syncListHiveDatabases() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + ListHiveDatabasesRequest request = + ListHiveDatabasesRequest.newBuilder() + .setParent(CatalogName.of("[PROJECT]", "[CATALOG]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + for (HiveDatabase element : + hiveMetastoreServiceClient.listHiveDatabases(request).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveDatabases_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/SyncListHiveDatabasesCatalogname.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/SyncListHiveDatabasesCatalogname.java new file mode 100644 index 000000000000..cc6f1e92157e --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/SyncListHiveDatabasesCatalogname.java @@ -0,0 +1,46 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveDatabases_Catalogname_sync] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; + +public class SyncListHiveDatabasesCatalogname { + + public static void main(String[] args) throws Exception { + syncListHiveDatabasesCatalogname(); + } + + public static void syncListHiveDatabasesCatalogname() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + CatalogName parent = CatalogName.of("[PROJECT]", "[CATALOG]"); + for (HiveDatabase element : + hiveMetastoreServiceClient.listHiveDatabases(parent).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveDatabases_Catalogname_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/SyncListHiveDatabasesString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/SyncListHiveDatabasesString.java new file mode 100644 index 000000000000..2ef333651256 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivedatabases/SyncListHiveDatabasesString.java @@ -0,0 +1,46 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveDatabases_String_sync] +import com.google.cloud.biglake.hive.v1beta.CatalogName; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; + +public class SyncListHiveDatabasesString { + + public static void main(String[] args) throws Exception { + syncListHiveDatabasesString(); + } + + public static void syncListHiveDatabasesString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String parent = CatalogName.of("[PROJECT]", "[CATALOG]").toString(); + for (HiveDatabase element : + hiveMetastoreServiceClient.listHiveDatabases(parent).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveDatabases_String_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/AsyncListHiveTables.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/AsyncListHiveTables.java new file mode 100644 index 000000000000..23eaf519d6cc --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/AsyncListHiveTables.java @@ -0,0 +1,55 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveTables_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; + +public class AsyncListHiveTables { + + public static void main(String[] args) throws Exception { + asyncListHiveTables(); + } + + public static void asyncListHiveTables() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + ListHiveTablesRequest request = + ListHiveTablesRequest.newBuilder() + .setParent(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + ApiFuture future = + hiveMetastoreServiceClient.listHiveTablesPagedCallable().futureCall(request); + // Do something. + for (HiveTable element : future.get().iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveTables_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/AsyncListHiveTablesPaged.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/AsyncListHiveTablesPaged.java new file mode 100644 index 000000000000..f7ed6772200d --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/AsyncListHiveTablesPaged.java @@ -0,0 +1,63 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveTables_Paged_async] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest; +import com.google.cloud.biglake.hive.v1beta.ListHiveTablesResponse; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; +import com.google.common.base.Strings; + +public class AsyncListHiveTablesPaged { + + public static void main(String[] args) throws Exception { + asyncListHiveTablesPaged(); + } + + public static void asyncListHiveTablesPaged() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + ListHiveTablesRequest request = + ListHiveTablesRequest.newBuilder() + .setParent(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + while (true) { + ListHiveTablesResponse response = + hiveMetastoreServiceClient.listHiveTablesCallable().call(request); + for (HiveTable element : response.getTablesList()) { + // doThingsWith(element); + } + String nextPageToken = response.getNextPageToken(); + if (!Strings.isNullOrEmpty(nextPageToken)) { + request = request.toBuilder().setPageToken(nextPageToken).build(); + } else { + break; + } + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveTables_Paged_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/SyncListHiveTables.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/SyncListHiveTables.java new file mode 100644 index 000000000000..32f569e49d94 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/SyncListHiveTables.java @@ -0,0 +1,51 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveTables_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.ListHiveTablesRequest; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; + +public class SyncListHiveTables { + + public static void main(String[] args) throws Exception { + syncListHiveTables(); + } + + public static void syncListHiveTables() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + ListHiveTablesRequest request = + ListHiveTablesRequest.newBuilder() + .setParent(NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + for (HiveTable element : hiveMetastoreServiceClient.listHiveTables(request).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveTables_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/SyncListHiveTablesNamespacename.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/SyncListHiveTablesNamespacename.java new file mode 100644 index 000000000000..202a568d2579 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/SyncListHiveTablesNamespacename.java @@ -0,0 +1,45 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveTables_Namespacename_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; + +public class SyncListHiveTablesNamespacename { + + public static void main(String[] args) throws Exception { + syncListHiveTablesNamespacename(); + } + + public static void syncListHiveTablesNamespacename() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + NamespaceName parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]"); + for (HiveTable element : hiveMetastoreServiceClient.listHiveTables(parent).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveTables_Namespacename_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/SyncListHiveTablesString.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/SyncListHiveTablesString.java new file mode 100644 index 000000000000..7c79501f578a --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listhivetables/SyncListHiveTablesString.java @@ -0,0 +1,45 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListHiveTables_String_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.NamespaceName; + +public class SyncListHiveTablesString { + + public static void main(String[] args) throws Exception { + syncListHiveTablesString(); + } + + public static void syncListHiveTablesString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + String parent = NamespaceName.of("[PROJECT]", "[CATALOG]", "[DATABASE]").toString(); + for (HiveTable element : hiveMetastoreServiceClient.listHiveTables(parent).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListHiveTables_String_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listpartitions/AsyncListPartitions.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listpartitions/AsyncListPartitions.java new file mode 100644 index 000000000000..4e4f24c387cd --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/listpartitions/AsyncListPartitions.java @@ -0,0 +1,53 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_ListPartitions_async] +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.ListPartitionsRequest; +import com.google.cloud.biglake.hive.v1beta.ListPartitionsResponse; +import com.google.cloud.biglake.hive.v1beta.TableName; + +public class AsyncListPartitions { + + public static void main(String[] args) throws Exception { + asyncListPartitions(); + } + + public static void asyncListPartitions() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + ListPartitionsRequest request = + ListPartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[CATALOG]", "[DATABASE]", "[TABLE]").toString()) + .setFilter("filter-1274492040") + .build(); + ServerStream stream = + hiveMetastoreServiceClient.listPartitionsCallable().call(request); + for (ListPartitionsResponse response : stream) { + // Do something when a response is received. + } + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_ListPartitions_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivecatalog/AsyncUpdateHiveCatalog.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivecatalog/AsyncUpdateHiveCatalog.java new file mode 100644 index 000000000000..147f85569c02 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivecatalog/AsyncUpdateHiveCatalog.java @@ -0,0 +1,52 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_UpdateHiveCatalog_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest; +import com.google.protobuf.FieldMask; + +public class AsyncUpdateHiveCatalog { + + public static void main(String[] args) throws Exception { + asyncUpdateHiveCatalog(); + } + + public static void asyncUpdateHiveCatalog() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + UpdateHiveCatalogRequest request = + UpdateHiveCatalogRequest.newBuilder() + .setHiveCatalog(HiveCatalog.newBuilder().build()) + .setUpdateMask(FieldMask.newBuilder().build()) + .build(); + ApiFuture future = + hiveMetastoreServiceClient.updateHiveCatalogCallable().futureCall(request); + // Do something. + HiveCatalog response = future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_UpdateHiveCatalog_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivecatalog/SyncUpdateHiveCatalog.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivecatalog/SyncUpdateHiveCatalog.java new file mode 100644 index 000000000000..14027d4dbd2c --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivecatalog/SyncUpdateHiveCatalog.java @@ -0,0 +1,48 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_UpdateHiveCatalog_sync] +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveCatalogRequest; +import com.google.protobuf.FieldMask; + +public class SyncUpdateHiveCatalog { + + public static void main(String[] args) throws Exception { + syncUpdateHiveCatalog(); + } + + public static void syncUpdateHiveCatalog() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + UpdateHiveCatalogRequest request = + UpdateHiveCatalogRequest.newBuilder() + .setHiveCatalog(HiveCatalog.newBuilder().build()) + .setUpdateMask(FieldMask.newBuilder().build()) + .build(); + HiveCatalog response = hiveMetastoreServiceClient.updateHiveCatalog(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_UpdateHiveCatalog_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivecatalog/SyncUpdateHiveCatalogHivecatalogFieldmask.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivecatalog/SyncUpdateHiveCatalogHivecatalogFieldmask.java new file mode 100644 index 000000000000..c3208329807b --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivecatalog/SyncUpdateHiveCatalogHivecatalogFieldmask.java @@ -0,0 +1,44 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_UpdateHiveCatalog_HivecatalogFieldmask_sync] +import com.google.cloud.biglake.hive.v1beta.HiveCatalog; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.protobuf.FieldMask; + +public class SyncUpdateHiveCatalogHivecatalogFieldmask { + + public static void main(String[] args) throws Exception { + syncUpdateHiveCatalogHivecatalogFieldmask(); + } + + public static void syncUpdateHiveCatalogHivecatalogFieldmask() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + HiveCatalog hiveCatalog = HiveCatalog.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + HiveCatalog response = hiveMetastoreServiceClient.updateHiveCatalog(hiveCatalog, updateMask); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_UpdateHiveCatalog_HivecatalogFieldmask_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivedatabase/AsyncUpdateHiveDatabase.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivedatabase/AsyncUpdateHiveDatabase.java new file mode 100644 index 000000000000..181e2edc4f79 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivedatabase/AsyncUpdateHiveDatabase.java @@ -0,0 +1,52 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_UpdateHiveDatabase_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest; +import com.google.protobuf.FieldMask; + +public class AsyncUpdateHiveDatabase { + + public static void main(String[] args) throws Exception { + asyncUpdateHiveDatabase(); + } + + public static void asyncUpdateHiveDatabase() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + UpdateHiveDatabaseRequest request = + UpdateHiveDatabaseRequest.newBuilder() + .setHiveDatabase(HiveDatabase.newBuilder().build()) + .setUpdateMask(FieldMask.newBuilder().build()) + .build(); + ApiFuture future = + hiveMetastoreServiceClient.updateHiveDatabaseCallable().futureCall(request); + // Do something. + HiveDatabase response = future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_UpdateHiveDatabase_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivedatabase/SyncUpdateHiveDatabase.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivedatabase/SyncUpdateHiveDatabase.java new file mode 100644 index 000000000000..8aa4975f4822 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivedatabase/SyncUpdateHiveDatabase.java @@ -0,0 +1,48 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_UpdateHiveDatabase_sync] +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveDatabaseRequest; +import com.google.protobuf.FieldMask; + +public class SyncUpdateHiveDatabase { + + public static void main(String[] args) throws Exception { + syncUpdateHiveDatabase(); + } + + public static void syncUpdateHiveDatabase() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + UpdateHiveDatabaseRequest request = + UpdateHiveDatabaseRequest.newBuilder() + .setHiveDatabase(HiveDatabase.newBuilder().build()) + .setUpdateMask(FieldMask.newBuilder().build()) + .build(); + HiveDatabase response = hiveMetastoreServiceClient.updateHiveDatabase(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_UpdateHiveDatabase_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivedatabase/SyncUpdateHiveDatabaseHivedatabaseFieldmask.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivedatabase/SyncUpdateHiveDatabaseHivedatabaseFieldmask.java new file mode 100644 index 000000000000..1e4ea6bf1fd5 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivedatabase/SyncUpdateHiveDatabaseHivedatabaseFieldmask.java @@ -0,0 +1,45 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_UpdateHiveDatabase_HivedatabaseFieldmask_sync] +import com.google.cloud.biglake.hive.v1beta.HiveDatabase; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.protobuf.FieldMask; + +public class SyncUpdateHiveDatabaseHivedatabaseFieldmask { + + public static void main(String[] args) throws Exception { + syncUpdateHiveDatabaseHivedatabaseFieldmask(); + } + + public static void syncUpdateHiveDatabaseHivedatabaseFieldmask() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + HiveDatabase hiveDatabase = HiveDatabase.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + HiveDatabase response = + hiveMetastoreServiceClient.updateHiveDatabase(hiveDatabase, updateMask); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_UpdateHiveDatabase_HivedatabaseFieldmask_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivetable/AsyncUpdateHiveTable.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivetable/AsyncUpdateHiveTable.java new file mode 100644 index 000000000000..6c1a29fe757e --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivetable/AsyncUpdateHiveTable.java @@ -0,0 +1,52 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_UpdateHiveTable_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest; +import com.google.protobuf.FieldMask; + +public class AsyncUpdateHiveTable { + + public static void main(String[] args) throws Exception { + asyncUpdateHiveTable(); + } + + public static void asyncUpdateHiveTable() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + UpdateHiveTableRequest request = + UpdateHiveTableRequest.newBuilder() + .setHiveTable(HiveTable.newBuilder().build()) + .setUpdateMask(FieldMask.newBuilder().build()) + .build(); + ApiFuture future = + hiveMetastoreServiceClient.updateHiveTableCallable().futureCall(request); + // Do something. + HiveTable response = future.get(); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_UpdateHiveTable_async] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivetable/SyncUpdateHiveTable.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivetable/SyncUpdateHiveTable.java new file mode 100644 index 000000000000..ddcb996b641b --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivetable/SyncUpdateHiveTable.java @@ -0,0 +1,48 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_UpdateHiveTable_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.cloud.biglake.hive.v1beta.UpdateHiveTableRequest; +import com.google.protobuf.FieldMask; + +public class SyncUpdateHiveTable { + + public static void main(String[] args) throws Exception { + syncUpdateHiveTable(); + } + + public static void syncUpdateHiveTable() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + UpdateHiveTableRequest request = + UpdateHiveTableRequest.newBuilder() + .setHiveTable(HiveTable.newBuilder().build()) + .setUpdateMask(FieldMask.newBuilder().build()) + .build(); + HiveTable response = hiveMetastoreServiceClient.updateHiveTable(request); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_UpdateHiveTable_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivetable/SyncUpdateHiveTableHivetableFieldmask.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivetable/SyncUpdateHiveTableHivetableFieldmask.java new file mode 100644 index 000000000000..a958d4d8ae92 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservice/updatehivetable/SyncUpdateHiveTableHivetableFieldmask.java @@ -0,0 +1,44 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreService_UpdateHiveTable_HivetableFieldmask_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceClient; +import com.google.cloud.biglake.hive.v1beta.HiveTable; +import com.google.protobuf.FieldMask; + +public class SyncUpdateHiveTableHivetableFieldmask { + + public static void main(String[] args) throws Exception { + syncUpdateHiveTableHivetableFieldmask(); + } + + public static void syncUpdateHiveTableHivetableFieldmask() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (HiveMetastoreServiceClient hiveMetastoreServiceClient = + HiveMetastoreServiceClient.create()) { + HiveTable hiveTable = HiveTable.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + HiveTable response = hiveMetastoreServiceClient.updateHiveTable(hiveTable, updateMask); + } + } +} +// [END biglake_v1beta_generated_HiveMetastoreService_UpdateHiveTable_HivetableFieldmask_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservicesettings/createhivecatalog/SyncCreateHiveCatalog.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservicesettings/createhivecatalog/SyncCreateHiveCatalog.java new file mode 100644 index 000000000000..ad59a92932ce --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/hivemetastoreservicesettings/createhivecatalog/SyncCreateHiveCatalog.java @@ -0,0 +1,57 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.samples; + +// [START biglake_v1beta_generated_HiveMetastoreServiceSettings_CreateHiveCatalog_sync] +import com.google.cloud.biglake.hive.v1beta.HiveMetastoreServiceSettings; +import java.time.Duration; + +public class SyncCreateHiveCatalog { + + public static void main(String[] args) throws Exception { + syncCreateHiveCatalog(); + } + + public static void syncCreateHiveCatalog() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + HiveMetastoreServiceSettings.Builder hiveMetastoreServiceSettingsBuilder = + HiveMetastoreServiceSettings.newBuilder(); + hiveMetastoreServiceSettingsBuilder + .createHiveCatalogSettings() + .setRetrySettings( + hiveMetastoreServiceSettingsBuilder + .createHiveCatalogSettings() + .getRetrySettings() + .toBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(1)) + .setInitialRpcTimeoutDuration(Duration.ofSeconds(5)) + .setMaxAttempts(5) + .setMaxRetryDelayDuration(Duration.ofSeconds(30)) + .setMaxRpcTimeoutDuration(Duration.ofSeconds(60)) + .setRetryDelayMultiplier(1.3) + .setRpcTimeoutMultiplier(1.5) + .setTotalTimeoutDuration(Duration.ofSeconds(300)) + .build()); + HiveMetastoreServiceSettings hiveMetastoreServiceSettings = + hiveMetastoreServiceSettingsBuilder.build(); + } +} +// [END biglake_v1beta_generated_HiveMetastoreServiceSettings_CreateHiveCatalog_sync] diff --git a/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/stub/hivemetastoreservicestubsettings/createhivecatalog/SyncCreateHiveCatalog.java b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/stub/hivemetastoreservicestubsettings/createhivecatalog/SyncCreateHiveCatalog.java new file mode 100644 index 000000000000..8b592b029373 --- /dev/null +++ b/java-biglake/samples/snippets/generated/com/google/cloud/biglake/hive/v1beta/stub/hivemetastoreservicestubsettings/createhivecatalog/SyncCreateHiveCatalog.java @@ -0,0 +1,57 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.biglake.hive.v1beta.stub.samples; + +// [START biglake_v1beta_generated_HiveMetastoreServiceStubSettings_CreateHiveCatalog_sync] +import com.google.cloud.biglake.hive.v1beta.stub.HiveMetastoreServiceStubSettings; +import java.time.Duration; + +public class SyncCreateHiveCatalog { + + public static void main(String[] args) throws Exception { + syncCreateHiveCatalog(); + } + + public static void syncCreateHiveCatalog() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + HiveMetastoreServiceStubSettings.Builder hiveMetastoreServiceSettingsBuilder = + HiveMetastoreServiceStubSettings.newBuilder(); + hiveMetastoreServiceSettingsBuilder + .createHiveCatalogSettings() + .setRetrySettings( + hiveMetastoreServiceSettingsBuilder + .createHiveCatalogSettings() + .getRetrySettings() + .toBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(1)) + .setInitialRpcTimeoutDuration(Duration.ofSeconds(5)) + .setMaxAttempts(5) + .setMaxRetryDelayDuration(Duration.ofSeconds(30)) + .setMaxRpcTimeoutDuration(Duration.ofSeconds(60)) + .setRetryDelayMultiplier(1.3) + .setRpcTimeoutMultiplier(1.5) + .setTotalTimeoutDuration(Duration.ofSeconds(300)) + .build()); + HiveMetastoreServiceStubSettings hiveMetastoreServiceSettings = + hiveMetastoreServiceSettingsBuilder.build(); + } +} +// [END biglake_v1beta_generated_HiveMetastoreServiceStubSettings_CreateHiveCatalog_sync] diff --git a/versions.txt b/versions.txt index af9a5462d534..efdeaca22550 100644 --- a/versions.txt +++ b/versions.txt @@ -980,3 +980,5 @@ proto-google-cloud-networkconnectivity-v1beta:1.86.0:1.87.0-SNAPSHOT grpc-google-cloud-networkconnectivity-v1beta:1.86.0:1.87.0-SNAPSHOT proto-google-cloud-hypercomputecluster-v1:0.7.0:0.8.0-SNAPSHOT grpc-google-cloud-hypercomputecluster-v1:0.7.0:0.8.0-SNAPSHOT +proto-google-cloud-biglake-v1beta:0.75.0:0.76.0-SNAPSHOT +grpc-google-cloud-biglake-v1beta:0.75.0:0.76.0-SNAPSHOT