diff --git a/.librarian/state.yaml b/.librarian/state.yaml index 516a86aa6ed2..20949c0ffe2e 100644 --- a/.librarian/state.yaml +++ b/.librarian/state.yaml @@ -1970,10 +1970,12 @@ libraries: tag_format: '{id}-v{version}' - id: google-cloud-hypercomputecluster version: 0.2.0 - last_generated_commit: c662840a94dbdf708caa44893a2d49119cdd391c + last_generated_commit: 23ec7b6ee94a2fe364cdb5a212949f1cf37f85ad apis: - path: google/cloud/hypercomputecluster/v1beta service_config: hypercomputecluster_v1beta.yaml + - path: google/cloud/hypercomputecluster/v1 + service_config: hypercomputecluster_v1.yaml source_roots: - packages/google-cloud-hypercomputecluster preserve_regex: diff --git a/packages/google-cloud-hypercomputecluster/README.rst b/packages/google-cloud-hypercomputecluster/README.rst index 4e924fb07607..18f4dba828f9 100644 --- a/packages/google-cloud-hypercomputecluster/README.rst +++ b/packages/google-cloud-hypercomputecluster/README.rst @@ -3,7 +3,7 @@ Python Client for Cluster Director API |preview| |pypi| |versions| -`Cluster Director API`_: +`Cluster Director API`_: The Cluster Director API allows you to deploy, manage, and monitor clusters that run AI, ML, or HPC workloads. - `Client Library Documentation`_ - `Product Documentation`_ diff --git a/packages/google-cloud-hypercomputecluster/docs/README.rst b/packages/google-cloud-hypercomputecluster/docs/README.rst index 4e924fb07607..18f4dba828f9 100644 --- a/packages/google-cloud-hypercomputecluster/docs/README.rst +++ b/packages/google-cloud-hypercomputecluster/docs/README.rst @@ -3,7 +3,7 @@ Python Client for Cluster Director API |preview| |pypi| |versions| -`Cluster Director API`_: +`Cluster Director API`_: The Cluster Director API allows you to deploy, manage, and monitor clusters that run AI, ML, or HPC workloads. - `Client Library Documentation`_ - `Product Documentation`_ diff --git a/packages/google-cloud-hypercomputecluster/docs/hypercomputecluster_v1/hypercompute_cluster.rst b/packages/google-cloud-hypercomputecluster/docs/hypercomputecluster_v1/hypercompute_cluster.rst new file mode 100644 index 000000000000..109723503e55 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/docs/hypercomputecluster_v1/hypercompute_cluster.rst @@ -0,0 +1,10 @@ +HypercomputeCluster +------------------------------------- + +.. automodule:: google.cloud.hypercomputecluster_v1.services.hypercompute_cluster + :members: + :inherited-members: + +.. automodule:: google.cloud.hypercomputecluster_v1.services.hypercompute_cluster.pagers + :members: + :inherited-members: diff --git a/packages/google-cloud-hypercomputecluster/docs/hypercomputecluster_v1/services_.rst b/packages/google-cloud-hypercomputecluster/docs/hypercomputecluster_v1/services_.rst new file mode 100644 index 000000000000..c3eb0915998c --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/docs/hypercomputecluster_v1/services_.rst @@ -0,0 +1,6 @@ +Services for Google Cloud Hypercomputecluster v1 API +==================================================== +.. toctree:: + :maxdepth: 2 + + hypercompute_cluster diff --git a/packages/google-cloud-hypercomputecluster/docs/hypercomputecluster_v1/types_.rst b/packages/google-cloud-hypercomputecluster/docs/hypercomputecluster_v1/types_.rst new file mode 100644 index 000000000000..29b31683ba58 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/docs/hypercomputecluster_v1/types_.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Hypercomputecluster v1 API +================================================= + +.. automodule:: google.cloud.hypercomputecluster_v1.types + :members: + :show-inheritance: diff --git a/packages/google-cloud-hypercomputecluster/docs/index.rst b/packages/google-cloud-hypercomputecluster/docs/index.rst index a8d839406987..1fb5ab986a78 100644 --- a/packages/google-cloud-hypercomputecluster/docs/index.rst +++ b/packages/google-cloud-hypercomputecluster/docs/index.rst @@ -2,6 +2,17 @@ .. include:: multiprocessing.rst +This package includes clients for multiple versions of Cluster Director API. +By default, you will get version ``hypercomputecluster_v1``. + + +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + hypercomputecluster_v1/services_ + hypercomputecluster_v1/types_ API Reference ------------- diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster/__init__.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster/__init__.py index 5da3b717da60..647919d1245a 100644 --- a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster/__init__.py +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster/__init__.py @@ -18,13 +18,13 @@ __version__ = package_version.__version__ -from google.cloud.hypercomputecluster_v1beta.services.hypercompute_cluster.async_client import ( +from google.cloud.hypercomputecluster_v1.services.hypercompute_cluster.async_client import ( HypercomputeClusterAsyncClient, ) -from google.cloud.hypercomputecluster_v1beta.services.hypercompute_cluster.client import ( +from google.cloud.hypercomputecluster_v1.services.hypercompute_cluster.client import ( HypercomputeClusterClient, ) -from google.cloud.hypercomputecluster_v1beta.types.hypercompute_cluster import ( +from google.cloud.hypercomputecluster_v1.types.hypercompute_cluster import ( BootDisk, BucketReference, Cluster, @@ -67,8 +67,33 @@ StorageResourceConfig, UpdateClusterRequest, ) -from google.cloud.hypercomputecluster_v1beta.types.operation_metadata import ( +from google.cloud.hypercomputecluster_v1.types.operation_metadata import ( + CheckClusterHealth, + CreateFilestoreInstance, + CreateLoginNode, + CreateLustreInstance, + CreateNetwork, + CreateNodeset, + CreateOrchestrator, + CreatePartition, + CreatePrivateServiceAccess, + CreateStorageBucket, + DeleteFilestoreInstance, + DeleteLoginNode, + DeleteLustreInstance, + DeleteNetwork, + DeleteNodeset, + DeleteOrchestrator, + DeletePartition, + DeletePrivateServiceAccess, + DeleteStorageBucket, OperationMetadata, + OperationProgress, + OperationStep, + UpdateLoginNode, + UpdateNodeset, + UpdateOrchestrator, + UpdatePartition, ) __all__ = ( @@ -115,5 +140,30 @@ "StorageResource", "StorageResourceConfig", "UpdateClusterRequest", + "CheckClusterHealth", + "CreateFilestoreInstance", + "CreateLoginNode", + "CreateLustreInstance", + "CreateNetwork", + "CreateNodeset", + "CreateOrchestrator", + "CreatePartition", + "CreatePrivateServiceAccess", + "CreateStorageBucket", + "DeleteFilestoreInstance", + "DeleteLoginNode", + "DeleteLustreInstance", + "DeleteNetwork", + "DeleteNodeset", + "DeleteOrchestrator", + "DeletePartition", + "DeletePrivateServiceAccess", + "DeleteStorageBucket", "OperationMetadata", + "OperationProgress", + "OperationStep", + "UpdateLoginNode", + "UpdateNodeset", + "UpdateOrchestrator", + "UpdatePartition", ) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/__init__.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/__init__.py new file mode 100644 index 000000000000..220fdd595f06 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/__init__.py @@ -0,0 +1,272 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import sys + +import google.api_core as api_core + +from google.cloud.hypercomputecluster_v1 import gapic_version as package_version + +__version__ = package_version.__version__ + +if sys.version_info >= (3, 8): # pragma: NO COVER + from importlib import metadata +else: # pragma: NO COVER + # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove + # this code path once we drop support for Python 3.7 + import importlib_metadata as metadata + + +from .services.hypercompute_cluster import ( + HypercomputeClusterAsyncClient, + HypercomputeClusterClient, +) +from .types.hypercompute_cluster import ( + BootDisk, + BucketReference, + Cluster, + ComputeInstance, + ComputeInstanceSlurmNodeSet, + ComputeResource, + ComputeResourceConfig, + CreateClusterRequest, + DeleteClusterRequest, + ExistingBucketConfig, + ExistingFilestoreConfig, + ExistingLustreConfig, + ExistingNetworkConfig, + FileShareConfig, + FilestoreReference, + GcsAutoclassConfig, + GcsHierarchicalNamespaceConfig, + GetClusterRequest, + ListClustersRequest, + ListClustersResponse, + LustreReference, + NetworkReference, + NetworkResource, + NetworkResourceConfig, + NewBucketConfig, + NewFilestoreConfig, + NewFlexStartInstancesConfig, + NewLustreConfig, + NewNetworkConfig, + NewOnDemandInstancesConfig, + NewReservedInstancesConfig, + NewSpotInstancesConfig, + Orchestrator, + SlurmLoginNodes, + SlurmNodeSet, + SlurmOrchestrator, + SlurmPartition, + StorageConfig, + StorageResource, + StorageResourceConfig, + UpdateClusterRequest, +) +from .types.operation_metadata import ( + CheckClusterHealth, + CreateFilestoreInstance, + CreateLoginNode, + CreateLustreInstance, + CreateNetwork, + CreateNodeset, + CreateOrchestrator, + CreatePartition, + CreatePrivateServiceAccess, + CreateStorageBucket, + DeleteFilestoreInstance, + DeleteLoginNode, + DeleteLustreInstance, + DeleteNetwork, + DeleteNodeset, + DeleteOrchestrator, + DeletePartition, + DeletePrivateServiceAccess, + DeleteStorageBucket, + OperationMetadata, + OperationProgress, + OperationStep, + UpdateLoginNode, + UpdateNodeset, + UpdateOrchestrator, + UpdatePartition, +) + +if hasattr(api_core, "check_python_version") and hasattr( + api_core, "check_dependency_versions" +): # pragma: NO COVER + api_core.check_python_version("google.cloud.hypercomputecluster_v1") # type: ignore + api_core.check_dependency_versions("google.cloud.hypercomputecluster_v1") # type: ignore +else: # pragma: NO COVER + # An older version of api_core is installed which does not define the + # functions above. We do equivalent checks manually. + try: + import sys + import warnings + + _py_version_str = sys.version.split()[0] + _package_label = "google.cloud.hypercomputecluster_v1" + if sys.version_info < (3, 9): + warnings.warn( + "You are using a non-supported Python version " + + f"({_py_version_str}). Google will not post any further " + + f"updates to {_package_label} supporting this Python version. " + + "Please upgrade to the latest Python version, or at " + + f"least to Python 3.9, and then update {_package_label}.", + FutureWarning, + ) + if sys.version_info[:2] == (3, 9): + warnings.warn( + f"You are using a Python version ({_py_version_str}) " + + f"which Google will stop supporting in {_package_label} in " + + "January 2026. Please " + + "upgrade to the latest Python version, or at " + + "least to Python 3.10, before then, and " + + f"then update {_package_label}.", + FutureWarning, + ) + + def parse_version_to_tuple(version_string: str): + """Safely converts a semantic version string to a comparable tuple of integers. + Example: "4.25.8" -> (4, 25, 8) + Ignores non-numeric parts and handles common version formats. + Args: + version_string: Version string in the format "x.y.z" or "x.y.z" + Returns: + Tuple of integers for the parsed version string. + """ + parts = [] + for part in version_string.split("."): + try: + parts.append(int(part)) + except ValueError: + # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here. + # This is a simplification compared to 'packaging.parse_version', but sufficient + # for comparing strictly numeric semantic versions. + break + return tuple(parts) + + def _get_version(dependency_name): + try: + version_string: str = metadata.version(dependency_name) + parsed_version = parse_version_to_tuple(version_string) + return (parsed_version, version_string) + except Exception: + # Catch exceptions from metadata.version() (e.g., PackageNotFoundError) + # or errors during parse_version_to_tuple + return (None, "--") + + _dependency_package = "google.protobuf" + _next_supported_version = "4.25.8" + _next_supported_version_tuple = (4, 25, 8) + _recommendation = " (we recommend 6.x)" + (_version_used, _version_used_string) = _get_version(_dependency_package) + if _version_used and _version_used < _next_supported_version_tuple: + warnings.warn( + f"Package {_package_label} depends on " + + f"{_dependency_package}, currently installed at version " + + f"{_version_used_string}. Future updates to " + + f"{_package_label} will require {_dependency_package} at " + + f"version {_next_supported_version} or higher{_recommendation}." + + " Please ensure " + + "that either (a) your Python environment doesn't pin the " + + f"version of {_dependency_package}, so that updates to " + + f"{_package_label} can require the higher version, or " + + "(b) you manually update your Python environment to use at " + + f"least version {_next_supported_version} of " + + f"{_dependency_package}.", + FutureWarning, + ) + except Exception: + warnings.warn( + "Could not determine the version of Python " + + "currently being used. To continue receiving " + + "updates for {_package_label}, ensure you are " + + "using a supported version of Python; see " + + "https://devguide.python.org/versions/" + ) + +__all__ = ( + "HypercomputeClusterAsyncClient", + "BootDisk", + "BucketReference", + "CheckClusterHealth", + "Cluster", + "ComputeInstance", + "ComputeInstanceSlurmNodeSet", + "ComputeResource", + "ComputeResourceConfig", + "CreateClusterRequest", + "CreateFilestoreInstance", + "CreateLoginNode", + "CreateLustreInstance", + "CreateNetwork", + "CreateNodeset", + "CreateOrchestrator", + "CreatePartition", + "CreatePrivateServiceAccess", + "CreateStorageBucket", + "DeleteClusterRequest", + "DeleteFilestoreInstance", + "DeleteLoginNode", + "DeleteLustreInstance", + "DeleteNetwork", + "DeleteNodeset", + "DeleteOrchestrator", + "DeletePartition", + "DeletePrivateServiceAccess", + "DeleteStorageBucket", + "ExistingBucketConfig", + "ExistingFilestoreConfig", + "ExistingLustreConfig", + "ExistingNetworkConfig", + "FileShareConfig", + "FilestoreReference", + "GcsAutoclassConfig", + "GcsHierarchicalNamespaceConfig", + "GetClusterRequest", + "HypercomputeClusterClient", + "ListClustersRequest", + "ListClustersResponse", + "LustreReference", + "NetworkReference", + "NetworkResource", + "NetworkResourceConfig", + "NewBucketConfig", + "NewFilestoreConfig", + "NewFlexStartInstancesConfig", + "NewLustreConfig", + "NewNetworkConfig", + "NewOnDemandInstancesConfig", + "NewReservedInstancesConfig", + "NewSpotInstancesConfig", + "OperationMetadata", + "OperationProgress", + "OperationStep", + "Orchestrator", + "SlurmLoginNodes", + "SlurmNodeSet", + "SlurmOrchestrator", + "SlurmPartition", + "StorageConfig", + "StorageResource", + "StorageResourceConfig", + "UpdateClusterRequest", + "UpdateLoginNode", + "UpdateNodeset", + "UpdateOrchestrator", + "UpdatePartition", +) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/gapic_metadata.json b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/gapic_metadata.json new file mode 100644 index 000000000000..10a2ca36c00c --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/gapic_metadata.json @@ -0,0 +1,103 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.hypercomputecluster_v1", + "protoPackage": "google.cloud.hypercomputecluster.v1", + "schema": "1.0", + "services": { + "HypercomputeCluster": { + "clients": { + "grpc": { + "libraryClient": "HypercomputeClusterClient", + "rpcs": { + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + } + } + }, + "grpc-async": { + "libraryClient": "HypercomputeClusterAsyncClient", + "rpcs": { + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + } + } + }, + "rest": { + "libraryClient": "HypercomputeClusterClient", + "rpcs": { + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + } + } + } + } + } + } +} diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/gapic_version.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/gapic_version.py new file mode 100644 index 000000000000..b35a7c7cc0b3 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.2.0" # {x-release-please-version} diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/py.typed b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/py.typed new file mode 100644 index 000000000000..860d7baeabf3 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-hypercomputecluster package uses inline types. diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/__init__.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/__init__.py new file mode 100644 index 000000000000..cbf94b283c70 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/__init__.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/__init__.py new file mode 100644 index 000000000000..e82229774b5a --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .async_client import HypercomputeClusterAsyncClient +from .client import HypercomputeClusterClient + +__all__ = ( + "HypercomputeClusterClient", + "HypercomputeClusterAsyncClient", +) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/async_client.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/async_client.py new file mode 100644 index 000000000000..c98ca65f0a35 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/async_client.py @@ -0,0 +1,1369 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import logging as std_logging +import re +from collections import OrderedDict +from typing import ( + Callable, + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +import google.protobuf +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.api_core.client_options import ClientOptions +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.hypercomputecluster_v1 import gapic_version as package_version + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +from google.cloud.hypercomputecluster_v1.services.hypercompute_cluster import pagers +from google.cloud.hypercomputecluster_v1.types import ( + hypercompute_cluster, + operation_metadata, +) + +from .client import HypercomputeClusterClient +from .transports.base import DEFAULT_CLIENT_INFO, HypercomputeClusterTransport +from .transports.grpc_asyncio import HypercomputeClusterGrpcAsyncIOTransport + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class HypercomputeClusterAsyncClient: + """Service describing handlers for resources""" + + _client: HypercomputeClusterClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = HypercomputeClusterClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = HypercomputeClusterClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = HypercomputeClusterClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = HypercomputeClusterClient._DEFAULT_UNIVERSE + + bucket_path = staticmethod(HypercomputeClusterClient.bucket_path) + parse_bucket_path = staticmethod(HypercomputeClusterClient.parse_bucket_path) + cluster_path = staticmethod(HypercomputeClusterClient.cluster_path) + parse_cluster_path = staticmethod(HypercomputeClusterClient.parse_cluster_path) + compute_instance_path = staticmethod( + HypercomputeClusterClient.compute_instance_path + ) + parse_compute_instance_path = staticmethod( + HypercomputeClusterClient.parse_compute_instance_path + ) + disk_type_path = staticmethod(HypercomputeClusterClient.disk_type_path) + parse_disk_type_path = staticmethod(HypercomputeClusterClient.parse_disk_type_path) + file_instance_path = staticmethod(HypercomputeClusterClient.file_instance_path) + parse_file_instance_path = staticmethod( + HypercomputeClusterClient.parse_file_instance_path + ) + instance_path = staticmethod(HypercomputeClusterClient.instance_path) + parse_instance_path = staticmethod(HypercomputeClusterClient.parse_instance_path) + network_path = staticmethod(HypercomputeClusterClient.network_path) + parse_network_path = staticmethod(HypercomputeClusterClient.parse_network_path) + reservation_path = staticmethod(HypercomputeClusterClient.reservation_path) + parse_reservation_path = staticmethod( + HypercomputeClusterClient.parse_reservation_path + ) + subnetwork_path = staticmethod(HypercomputeClusterClient.subnetwork_path) + parse_subnetwork_path = staticmethod( + HypercomputeClusterClient.parse_subnetwork_path + ) + common_billing_account_path = staticmethod( + HypercomputeClusterClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + HypercomputeClusterClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(HypercomputeClusterClient.common_folder_path) + parse_common_folder_path = staticmethod( + HypercomputeClusterClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + HypercomputeClusterClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + HypercomputeClusterClient.parse_common_organization_path + ) + common_project_path = staticmethod(HypercomputeClusterClient.common_project_path) + parse_common_project_path = staticmethod( + HypercomputeClusterClient.parse_common_project_path + ) + common_location_path = staticmethod(HypercomputeClusterClient.common_location_path) + parse_common_location_path = staticmethod( + HypercomputeClusterClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + HypercomputeClusterAsyncClient: The constructed client. + """ + sa_info_func = ( + HypercomputeClusterClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(HypercomputeClusterAsyncClient, info, *args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + HypercomputeClusterAsyncClient: The constructed client. + """ + sa_file_func = ( + HypercomputeClusterClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(HypercomputeClusterAsyncClient, filename, *args, **kwargs) + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return HypercomputeClusterClient.get_mtls_endpoint_and_cert_source( + client_options + ) # type: ignore + + @property + def transport(self) -> HypercomputeClusterTransport: + """Returns the transport used by the client instance. + + Returns: + HypercomputeClusterTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = HypercomputeClusterClient.get_transport_class + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[ + str, + HypercomputeClusterTransport, + Callable[..., HypercomputeClusterTransport], + ] + ] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the hypercompute cluster async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,HypercomputeClusterTransport,Callable[..., HypercomputeClusterTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the HypercomputeClusterTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = HypercomputeClusterClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient`.", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "credentialsType": None, + }, + ) + + async def list_clusters( + self, + request: Optional[Union[hypercompute_cluster.ListClustersRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListClustersAsyncPager: + r"""Lists Clusters in a given project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import hypercomputecluster_v1 + + async def sample_list_clusters(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterAsyncClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_clusters(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.hypercomputecluster_v1.types.ListClustersRequest, dict]]): + The request object. Request message for + [ListClusters][google.cloud.hypercomputecluster.v1.HypercomputeCluster.ListClusters]. + parent (:class:`str`): + Required. Parent location of the clusters to list, in + the format ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.hypercomputecluster_v1.services.hypercompute_cluster.pagers.ListClustersAsyncPager: + Response message for + [ListClusters][google.cloud.hypercomputecluster.v1.HypercomputeCluster.ListClusters]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, hypercompute_cluster.ListClustersRequest): + request = hypercompute_cluster.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_clusters + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListClustersAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_cluster( + self, + request: Optional[Union[hypercompute_cluster.GetClusterRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> hypercompute_cluster.Cluster: + r"""Gets details of a single Cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import hypercomputecluster_v1 + + async def sample_get_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterAsyncClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = await client.get_cluster(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.hypercomputecluster_v1.types.GetClusterRequest, dict]]): + The request object. Request message for + [GetCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.GetCluster]. + name (:class:`str`): + Required. Name of the cluster to retrieve, in the format + ``projects/{project}/locations/{location}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.hypercomputecluster_v1.types.Cluster: + A collection of virtual machines and + connected resources forming a + high-performance computing cluster + capable of running large-scale, tightly + coupled workloads. A cluster combines a + set a compute resources that perform + computations, storage resources that + contain inputs and store outputs, an + orchestrator that is responsible for + assigning jobs to compute resources, and + network resources that connect + everything together. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, hypercompute_cluster.GetClusterRequest): + request = hypercompute_cluster.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_cluster + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_cluster( + self, + request: Optional[ + Union[hypercompute_cluster.CreateClusterRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + cluster: Optional[hypercompute_cluster.Cluster] = None, + cluster_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Cluster in a given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import hypercomputecluster_v1 + + async def sample_create_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterAsyncClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.hypercomputecluster_v1.types.CreateClusterRequest, dict]]): + The request object. Request message for + [CreateCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.CreateCluster]. + parent (:class:`str`): + Required. Parent location in which the cluster should be + created, in the format + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`google.cloud.hypercomputecluster_v1.types.Cluster`): + Required. Cluster to create. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. ID of the cluster to create. Must conform to + `RFC-1034 `__ + (lower-case, alphanumeric, and at most 63 characters). + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.hypercomputecluster_v1.types.Cluster` A collection of virtual machines and connected resources forming a + high-performance computing cluster capable of running + large-scale, tightly coupled workloads. A cluster + combines a set a compute resources that perform + computations, storage resources that contain inputs + and store outputs, an orchestrator that is + responsible for assigning jobs to compute resources, + and network resources that connect everything + together. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, cluster, cluster_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, hypercompute_cluster.CreateClusterRequest): + request = hypercompute_cluster.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if cluster is not None: + request.cluster = cluster + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_cluster + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + hypercompute_cluster.Cluster, + metadata_type=operation_metadata.OperationMetadata, + ) + + # Done; return the response. + return response + + async def update_cluster( + self, + request: Optional[ + Union[hypercompute_cluster.UpdateClusterRequest, dict] + ] = None, + *, + cluster: Optional[hypercompute_cluster.Cluster] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates the parameters of a single Cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import hypercomputecluster_v1 + + async def sample_update_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterAsyncClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.UpdateClusterRequest( + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.hypercomputecluster_v1.types.UpdateClusterRequest, dict]]): + The request object. Request message for + [UpdateCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.UpdateCluster]. + cluster (:class:`google.cloud.hypercomputecluster_v1.types.Cluster`): + Required. Cluster to update. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. Mask specifying which + fields in the cluster to update. All + paths must be specified explicitly - + wildcards are not supported. At least + one path must be provided. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.hypercomputecluster_v1.types.Cluster` A collection of virtual machines and connected resources forming a + high-performance computing cluster capable of running + large-scale, tightly coupled workloads. A cluster + combines a set a compute resources that perform + computations, storage resources that contain inputs + and store outputs, an orchestrator that is + responsible for assigning jobs to compute resources, + and network resources that connect everything + together. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [cluster, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, hypercompute_cluster.UpdateClusterRequest): + request = hypercompute_cluster.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_cluster + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("cluster.name", request.cluster.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + hypercompute_cluster.Cluster, + metadata_type=operation_metadata.OperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_cluster( + self, + request: Optional[ + Union[hypercompute_cluster.DeleteClusterRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single Cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import hypercomputecluster_v1 + + async def sample_delete_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterAsyncClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.hypercomputecluster_v1.types.DeleteClusterRequest, dict]]): + The request object. Request message for + [DeleteCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.DeleteCluster]. + name (:class:`str`): + Required. Name of the cluster to delete, in the format + ``projects/{project}/locations/{location}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, hypercompute_cluster.DeleteClusterRequest): + request = hypercompute_cluster.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_cluster + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operation_metadata.OperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.list_operations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.get_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.delete_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.cancel_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.get_location] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.list_locations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "HypercomputeClusterAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +__all__ = ("HypercomputeClusterAsyncClient",) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/client.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/client.py new file mode 100644 index 000000000000..769118df2ec3 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/client.py @@ -0,0 +1,1947 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json +import logging as std_logging +import os +import re +import warnings +from collections import OrderedDict +from http import HTTPStatus +from typing import ( + Callable, + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +import google.protobuf +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.hypercomputecluster_v1 import gapic_version as package_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +from google.cloud.hypercomputecluster_v1.services.hypercompute_cluster import pagers +from google.cloud.hypercomputecluster_v1.types import ( + hypercompute_cluster, + operation_metadata, +) + +from .transports.base import DEFAULT_CLIENT_INFO, HypercomputeClusterTransport +from .transports.grpc import HypercomputeClusterGrpcTransport +from .transports.grpc_asyncio import HypercomputeClusterGrpcAsyncIOTransport +from .transports.rest import HypercomputeClusterRestTransport + + +class HypercomputeClusterClientMeta(type): + """Metaclass for the HypercomputeCluster client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[HypercomputeClusterTransport]] + _transport_registry["grpc"] = HypercomputeClusterGrpcTransport + _transport_registry["grpc_asyncio"] = HypercomputeClusterGrpcAsyncIOTransport + _transport_registry["rest"] = HypercomputeClusterRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[HypercomputeClusterTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class HypercomputeClusterClient(metaclass=HypercomputeClusterClientMeta): + """Service describing handlers for resources""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "hypercomputecluster.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "hypercomputecluster.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + HypercomputeClusterClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + HypercomputeClusterClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> HypercomputeClusterTransport: + """Returns the transport used by the client instance. + + Returns: + HypercomputeClusterTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def bucket_path( + project: str, + bucket: str, + ) -> str: + """Returns a fully-qualified bucket string.""" + return "projects/{project}/buckets/{bucket}".format( + project=project, + bucket=bucket, + ) + + @staticmethod + def parse_bucket_path(path: str) -> Dict[str, str]: + """Parses a bucket path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/buckets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def cluster_path( + project: str, + location: str, + cluster: str, + ) -> str: + """Returns a fully-qualified cluster string.""" + return "projects/{project}/locations/{location}/clusters/{cluster}".format( + project=project, + location=location, + cluster=cluster, + ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str, str]: + """Parses a cluster path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def compute_instance_path( + project: str, + zone: str, + instance: str, + ) -> str: + """Returns a fully-qualified compute_instance string.""" + return "projects/{project}/zones/{zone}/instances/{instance}".format( + project=project, + zone=zone, + instance=instance, + ) + + @staticmethod + def parse_compute_instance_path(path: str) -> Dict[str, str]: + """Parses a compute_instance path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/zones/(?P.+?)/instances/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def disk_type_path( + project: str, + zone: str, + disk_type: str, + ) -> str: + """Returns a fully-qualified disk_type string.""" + return "projects/{project}/zones/{zone}/diskTypes/{disk_type}".format( + project=project, + zone=zone, + disk_type=disk_type, + ) + + @staticmethod + def parse_disk_type_path(path: str) -> Dict[str, str]: + """Parses a disk_type path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/zones/(?P.+?)/diskTypes/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def file_instance_path( + project: str, + location: str, + instance: str, + ) -> str: + """Returns a fully-qualified file_instance string.""" + return "projects/{project}/locations/{location}/instances/{instance}".format( + project=project, + location=location, + instance=instance, + ) + + @staticmethod + def parse_file_instance_path(path: str) -> Dict[str, str]: + """Parses a file_instance path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/instances/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def instance_path( + project: str, + location: str, + instance: str, + ) -> str: + """Returns a fully-qualified instance string.""" + return "projects/{project}/locations/{location}/instances/{instance}".format( + project=project, + location=location, + instance=instance, + ) + + @staticmethod + def parse_instance_path(path: str) -> Dict[str, str]: + """Parses a instance path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/instances/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def network_path( + project: str, + network: str, + ) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format( + project=project, + network=network, + ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str, str]: + """Parses a network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/global/networks/(?P.+?)$", path + ) + return m.groupdict() if m else {} + + @staticmethod + def reservation_path( + project: str, + zone: str, + reservation: str, + ) -> str: + """Returns a fully-qualified reservation string.""" + return "projects/{project}/zones/{zone}/reservations/{reservation}".format( + project=project, + zone=zone, + reservation=reservation, + ) + + @staticmethod + def parse_reservation_path(path: str) -> Dict[str, str]: + """Parses a reservation path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/zones/(?P.+?)/reservations/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def subnetwork_path( + project: str, + region: str, + subnetwork: str, + ) -> str: + """Returns a fully-qualified subnetwork string.""" + return "projects/{project}/regions/{region}/subnetworks/{subnetwork}".format( + project=project, + region=region, + subnetwork=subnetwork, + ) + + @staticmethod + def parse_subnetwork_path(path: str) -> Dict[str, str]: + """Parses a subnetwork path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/regions/(?P.+?)/subnetworks/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = HypercomputeClusterClient._use_client_cert_effective() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert: + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = HypercomputeClusterClient._use_client_cert_effective() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert, use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = HypercomputeClusterClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = HypercomputeClusterClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = HypercomputeClusterClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = HypercomputeClusterClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + + # NOTE (b/349488459): universe validation is disabled until further notice. + return True + + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[ + str, + HypercomputeClusterTransport, + Callable[..., HypercomputeClusterTransport], + ] + ] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the hypercompute cluster client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,HypercomputeClusterTransport,Callable[..., HypercomputeClusterTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the HypercomputeClusterTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + HypercomputeClusterClient._read_environment_variables() + ) + self._client_cert_source = HypercomputeClusterClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = HypercomputeClusterClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, HypercomputeClusterTransport) + if transport_provided: + # transport is a HypercomputeClusterTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes directly." + ) + self._transport = cast(HypercomputeClusterTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or HypercomputeClusterClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + transport_init: Union[ + Type[HypercomputeClusterTransport], + Callable[..., HypercomputeClusterTransport], + ] = ( + HypercomputeClusterClient.get_transport_class(transport) + if isinstance(transport, str) or transport is None + else cast(Callable[..., HypercomputeClusterTransport], transport) + ) + # initialize with the provided callable or the passed in class + self._transport = transport_init( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.hypercomputecluster_v1.HypercomputeClusterClient`.", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "credentialsType": None, + }, + ) + + def list_clusters( + self, + request: Optional[Union[hypercompute_cluster.ListClustersRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListClustersPager: + r"""Lists Clusters in a given project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import hypercomputecluster_v1 + + def sample_list_clusters(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_clusters(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.hypercomputecluster_v1.types.ListClustersRequest, dict]): + The request object. Request message for + [ListClusters][google.cloud.hypercomputecluster.v1.HypercomputeCluster.ListClusters]. + parent (str): + Required. Parent location of the clusters to list, in + the format ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.hypercomputecluster_v1.services.hypercompute_cluster.pagers.ListClustersPager: + Response message for + [ListClusters][google.cloud.hypercomputecluster.v1.HypercomputeCluster.ListClusters]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, hypercompute_cluster.ListClustersRequest): + request = hypercompute_cluster.ListClustersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_clusters] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListClustersPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_cluster( + self, + request: Optional[Union[hypercompute_cluster.GetClusterRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> hypercompute_cluster.Cluster: + r"""Gets details of a single Cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import hypercomputecluster_v1 + + def sample_get_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = client.get_cluster(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.hypercomputecluster_v1.types.GetClusterRequest, dict]): + The request object. Request message for + [GetCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.GetCluster]. + name (str): + Required. Name of the cluster to retrieve, in the format + ``projects/{project}/locations/{location}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.hypercomputecluster_v1.types.Cluster: + A collection of virtual machines and + connected resources forming a + high-performance computing cluster + capable of running large-scale, tightly + coupled workloads. A cluster combines a + set a compute resources that perform + computations, storage resources that + contain inputs and store outputs, an + orchestrator that is responsible for + assigning jobs to compute resources, and + network resources that connect + everything together. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, hypercompute_cluster.GetClusterRequest): + request = hypercompute_cluster.GetClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_cluster( + self, + request: Optional[ + Union[hypercompute_cluster.CreateClusterRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + cluster: Optional[hypercompute_cluster.Cluster] = None, + cluster_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a new Cluster in a given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import hypercomputecluster_v1 + + def sample_create_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.hypercomputecluster_v1.types.CreateClusterRequest, dict]): + The request object. Request message for + [CreateCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.CreateCluster]. + parent (str): + Required. Parent location in which the cluster should be + created, in the format + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (google.cloud.hypercomputecluster_v1.types.Cluster): + Required. Cluster to create. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. ID of the cluster to create. Must conform to + `RFC-1034 `__ + (lower-case, alphanumeric, and at most 63 characters). + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.hypercomputecluster_v1.types.Cluster` A collection of virtual machines and connected resources forming a + high-performance computing cluster capable of running + large-scale, tightly coupled workloads. A cluster + combines a set a compute resources that perform + computations, storage resources that contain inputs + and store outputs, an orchestrator that is + responsible for assigning jobs to compute resources, + and network resources that connect everything + together. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, cluster, cluster_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, hypercompute_cluster.CreateClusterRequest): + request = hypercompute_cluster.CreateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if cluster is not None: + request.cluster = cluster + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + hypercompute_cluster.Cluster, + metadata_type=operation_metadata.OperationMetadata, + ) + + # Done; return the response. + return response + + def update_cluster( + self, + request: Optional[ + Union[hypercompute_cluster.UpdateClusterRequest, dict] + ] = None, + *, + cluster: Optional[hypercompute_cluster.Cluster] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Updates the parameters of a single Cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import hypercomputecluster_v1 + + def sample_update_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.UpdateClusterRequest( + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.hypercomputecluster_v1.types.UpdateClusterRequest, dict]): + The request object. Request message for + [UpdateCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.UpdateCluster]. + cluster (google.cloud.hypercomputecluster_v1.types.Cluster): + Required. Cluster to update. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which + fields in the cluster to update. All + paths must be specified explicitly - + wildcards are not supported. At least + one path must be provided. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.hypercomputecluster_v1.types.Cluster` A collection of virtual machines and connected resources forming a + high-performance computing cluster capable of running + large-scale, tightly coupled workloads. A cluster + combines a set a compute resources that perform + computations, storage resources that contain inputs + and store outputs, an orchestrator that is + responsible for assigning jobs to compute resources, + and network resources that connect everything + together. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [cluster, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, hypercompute_cluster.UpdateClusterRequest): + request = hypercompute_cluster.UpdateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("cluster.name", request.cluster.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + hypercompute_cluster.Cluster, + metadata_type=operation_metadata.OperationMetadata, + ) + + # Done; return the response. + return response + + def delete_cluster( + self, + request: Optional[ + Union[hypercompute_cluster.DeleteClusterRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Deletes a single Cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import hypercomputecluster_v1 + + def sample_delete_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.hypercomputecluster_v1.types.DeleteClusterRequest, dict]): + The request object. Request message for + [DeleteCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.DeleteCluster]. + name (str): + Required. Name of the cluster to delete, in the format + ``projects/{project}/locations/{location}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, hypercompute_cluster.DeleteClusterRequest): + request = hypercompute_cluster.DeleteClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operation_metadata.OperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "HypercomputeClusterClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_operations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_location] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_locations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + +__all__ = ("HypercomputeClusterClient",) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/pagers.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/pagers.py new file mode 100644 index 000000000000..dce7d30c104c --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/pagers.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Iterator, + Optional, + Sequence, + Tuple, + Union, +) + +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import retry_async as retries_async + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] + OptionalAsyncRetry = Union[ + retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None + ] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore + +from google.cloud.hypercomputecluster_v1.types import hypercompute_cluster + + +class ListClustersPager: + """A pager for iterating through ``list_clusters`` requests. + + This class thinly wraps an initial + :class:`google.cloud.hypercomputecluster_v1.types.ListClustersResponse` object, and + provides an ``__iter__`` method to iterate through its + ``clusters`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListClusters`` requests and continue to iterate + through the ``clusters`` field on the + corresponding responses. + + All the usual :class:`google.cloud.hypercomputecluster_v1.types.ListClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., hypercompute_cluster.ListClustersResponse], + request: hypercompute_cluster.ListClustersRequest, + response: hypercompute_cluster.ListClustersResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.hypercomputecluster_v1.types.ListClustersRequest): + The initial request object. + response (google.cloud.hypercomputecluster_v1.types.ListClustersResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = hypercompute_cluster.ListClustersRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[hypercompute_cluster.ListClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[hypercompute_cluster.Cluster]: + for page in self.pages: + yield from page.clusters + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListClustersAsyncPager: + """A pager for iterating through ``list_clusters`` requests. + + This class thinly wraps an initial + :class:`google.cloud.hypercomputecluster_v1.types.ListClustersResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``clusters`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListClusters`` requests and continue to iterate + through the ``clusters`` field on the + corresponding responses. + + All the usual :class:`google.cloud.hypercomputecluster_v1.types.ListClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[hypercompute_cluster.ListClustersResponse]], + request: hypercompute_cluster.ListClustersRequest, + response: hypercompute_cluster.ListClustersResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.hypercomputecluster_v1.types.ListClustersRequest): + The initial request object. + response (google.cloud.hypercomputecluster_v1.types.ListClustersResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = hypercompute_cluster.ListClustersRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[hypercompute_cluster.ListClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[hypercompute_cluster.Cluster]: + async def async_generator(): + async for page in self.pages: + for response in page.clusters: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/README.rst b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/README.rst new file mode 100644 index 000000000000..d2bd7a88e4be --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/README.rst @@ -0,0 +1,9 @@ + +transport inheritance structure +_______________________________ + +`HypercomputeClusterTransport` is the ABC for all transports. +- public child `HypercomputeClusterGrpcTransport` for sync gRPC transport (defined in `grpc.py`). +- public child `HypercomputeClusterGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). +- private child `_BaseHypercomputeClusterRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). +- public child `HypercomputeClusterRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/__init__.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/__init__.py new file mode 100644 index 000000000000..10f56f362bc3 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import HypercomputeClusterTransport +from .grpc import HypercomputeClusterGrpcTransport +from .grpc_asyncio import HypercomputeClusterGrpcAsyncIOTransport +from .rest import HypercomputeClusterRestInterceptor, HypercomputeClusterRestTransport + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[HypercomputeClusterTransport]] +_transport_registry["grpc"] = HypercomputeClusterGrpcTransport +_transport_registry["grpc_asyncio"] = HypercomputeClusterGrpcAsyncIOTransport +_transport_registry["rest"] = HypercomputeClusterRestTransport + +__all__ = ( + "HypercomputeClusterTransport", + "HypercomputeClusterGrpcTransport", + "HypercomputeClusterGrpcAsyncIOTransport", + "HypercomputeClusterRestTransport", + "HypercomputeClusterRestInterceptor", +) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/base.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/base.py new file mode 100644 index 000000000000..813187d28473 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/base.py @@ -0,0 +1,343 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +import google.api_core +import google.auth # type: ignore +import google.protobuf +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1, operations_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.hypercomputecluster_v1 import gapic_version as package_version +from google.cloud.hypercomputecluster_v1.types import hypercompute_cluster + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +class HypercomputeClusterTransport(abc.ABC): + """Abstract transport class for HypercomputeCluster.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "hypercomputecluster.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'hypercomputecluster.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the scopes. + self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, + ) + elif credentials is None and not self._ignore_credentials: + credentials, _ = google.auth.default( + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, + default_timeout=3600.0, + client_info=client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, + default_timeout=3600.0, + client_info=client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, + default_timeout=3600.0, + client_info=client_info, + ), + self.get_location: gapic_v1.method.wrap_method( + self.get_location, + default_timeout=None, + client_info=client_info, + ), + self.list_locations: gapic_v1.method.wrap_method( + self.list_locations, + default_timeout=None, + client_info=client_info, + ), + self.cancel_operation: gapic_v1.method.wrap_method( + self.cancel_operation, + default_timeout=None, + client_info=client_info, + ), + self.delete_operation: gapic_v1.method.wrap_method( + self.delete_operation, + default_timeout=None, + client_info=client_info, + ), + self.get_operation: gapic_v1.method.wrap_method( + self.get_operation, + default_timeout=None, + client_info=client_info, + ), + self.list_operations: gapic_v1.method.wrap_method( + self.list_operations, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def list_clusters( + self, + ) -> Callable[ + [hypercompute_cluster.ListClustersRequest], + Union[ + hypercompute_cluster.ListClustersResponse, + Awaitable[hypercompute_cluster.ListClustersResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.GetClusterRequest], + Union[hypercompute_cluster.Cluster, Awaitable[hypercompute_cluster.Cluster]], + ]: + raise NotImplementedError() + + @property + def create_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.CreateClusterRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def update_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.UpdateClusterRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.DeleteClusterRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("HypercomputeClusterTransport",) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/grpc.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/grpc.py new file mode 100644 index 000000000000..338c82b6fe17 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/grpc.py @@ -0,0 +1,599 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json +import logging as std_logging +import pickle +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +import google.auth # type: ignore +import google.protobuf.message +import grpc # type: ignore +import proto # type: ignore +from google.api_core import gapic_v1, grpc_helpers, operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf.json_format import MessageToJson + +from google.cloud.hypercomputecluster_v1.types import hypercompute_cluster + +from .base import DEFAULT_CLIENT_INFO, HypercomputeClusterTransport + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + + +class HypercomputeClusterGrpcTransport(HypercomputeClusterTransport): + """gRPC backend transport for HypercomputeCluster. + + Service describing handlers for resources + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "hypercomputecluster.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'hypercomputecluster.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if a ``channel`` instance is provided. + channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if a ``channel`` instance is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if isinstance(channel, grpc.Channel): + # Ignore credentials if a channel was passed. + credentials = None + self._ignore_credentials = True + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "hypercomputecluster.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def list_clusters( + self, + ) -> Callable[ + [hypercompute_cluster.ListClustersRequest], + hypercompute_cluster.ListClustersResponse, + ]: + r"""Return a callable for the list clusters method over gRPC. + + Lists Clusters in a given project and location. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self._logged_channel.unary_unary( + "/google.cloud.hypercomputecluster.v1.HypercomputeCluster/ListClusters", + request_serializer=hypercompute_cluster.ListClustersRequest.serialize, + response_deserializer=hypercompute_cluster.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def get_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.GetClusterRequest], hypercompute_cluster.Cluster + ]: + r"""Return a callable for the get cluster method over gRPC. + + Gets details of a single Cluster. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.hypercomputecluster.v1.HypercomputeCluster/GetCluster", + request_serializer=hypercompute_cluster.GetClusterRequest.serialize, + response_deserializer=hypercompute_cluster.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def create_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.CreateClusterRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a new Cluster in a given project and + location. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.hypercomputecluster.v1.HypercomputeCluster/CreateCluster", + request_serializer=hypercompute_cluster.CreateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_cluster"] + + @property + def update_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.UpdateClusterRequest], operations_pb2.Operation + ]: + r"""Return a callable for the update cluster method over gRPC. + + Updates the parameters of a single Cluster. + + Returns: + Callable[[~.UpdateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.hypercomputecluster.v1.HypercomputeCluster/UpdateCluster", + request_serializer=hypercompute_cluster.UpdateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_cluster"] + + @property + def delete_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.DeleteClusterRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a single Cluster. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.hypercomputecluster.v1.HypercomputeCluster/DeleteCluster", + request_serializer=hypercompute_cluster.DeleteClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_cluster"] + + def close(self): + self._logged_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("HypercomputeClusterGrpcTransport",) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/grpc_asyncio.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/grpc_asyncio.py new file mode 100644 index 000000000000..8ea533019260 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/grpc_asyncio.py @@ -0,0 +1,691 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import inspect +import json +import logging as std_logging +import pickle +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +import google.protobuf.message +import grpc # type: ignore +import proto # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1, grpc_helpers_async, operations_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf.json_format import MessageToJson +from grpc.experimental import aio # type: ignore + +from google.cloud.hypercomputecluster_v1.types import hypercompute_cluster + +from .base import DEFAULT_CLIENT_INFO, HypercomputeClusterTransport +from .grpc import HypercomputeClusterGrpcTransport + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + + +class HypercomputeClusterGrpcAsyncIOTransport(HypercomputeClusterTransport): + """gRPC AsyncIO backend transport for HypercomputeCluster. + + Service describing handlers for resources + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "hypercomputecluster.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "hypercomputecluster.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'hypercomputecluster.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if a ``channel`` instance is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if isinstance(channel, aio.Channel): + # Ignore credentials if a channel was passed. + credentials = None + self._ignore_credentials = True + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel + self._wrap_with_kind = ( + "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters + ) + # Wrap messages. This must be done after self._logged_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self._logged_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def list_clusters( + self, + ) -> Callable[ + [hypercompute_cluster.ListClustersRequest], + Awaitable[hypercompute_cluster.ListClustersResponse], + ]: + r"""Return a callable for the list clusters method over gRPC. + + Lists Clusters in a given project and location. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self._logged_channel.unary_unary( + "/google.cloud.hypercomputecluster.v1.HypercomputeCluster/ListClusters", + request_serializer=hypercompute_cluster.ListClustersRequest.serialize, + response_deserializer=hypercompute_cluster.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def get_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.GetClusterRequest], + Awaitable[hypercompute_cluster.Cluster], + ]: + r"""Return a callable for the get cluster method over gRPC. + + Gets details of a single Cluster. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.hypercomputecluster.v1.HypercomputeCluster/GetCluster", + request_serializer=hypercompute_cluster.GetClusterRequest.serialize, + response_deserializer=hypercompute_cluster.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def create_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.CreateClusterRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a new Cluster in a given project and + location. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.hypercomputecluster.v1.HypercomputeCluster/CreateCluster", + request_serializer=hypercompute_cluster.CreateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_cluster"] + + @property + def update_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.UpdateClusterRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the update cluster method over gRPC. + + Updates the parameters of a single Cluster. + + Returns: + Callable[[~.UpdateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.hypercomputecluster.v1.HypercomputeCluster/UpdateCluster", + request_serializer=hypercompute_cluster.UpdateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_cluster"] + + @property + def delete_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.DeleteClusterRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a single Cluster. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.hypercomputecluster.v1.HypercomputeCluster/DeleteCluster", + request_serializer=hypercompute_cluster.DeleteClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_cluster"] + + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.list_clusters: self._wrap_method( + self.list_clusters, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_cluster: self._wrap_method( + self.get_cluster, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_cluster: self._wrap_method( + self.create_cluster, + default_timeout=3600.0, + client_info=client_info, + ), + self.update_cluster: self._wrap_method( + self.update_cluster, + default_timeout=3600.0, + client_info=client_info, + ), + self.delete_cluster: self._wrap_method( + self.delete_cluster, + default_timeout=3600.0, + client_info=client_info, + ), + self.get_location: self._wrap_method( + self.get_location, + default_timeout=None, + client_info=client_info, + ), + self.list_locations: self._wrap_method( + self.list_locations, + default_timeout=None, + client_info=client_info, + ), + self.cancel_operation: self._wrap_method( + self.cancel_operation, + default_timeout=None, + client_info=client_info, + ), + self.delete_operation: self._wrap_method( + self.delete_operation, + default_timeout=None, + client_info=client_info, + ), + self.get_operation: self._wrap_method( + self.get_operation, + default_timeout=None, + client_info=client_info, + ), + self.list_operations: self._wrap_method( + self.list_operations, + default_timeout=None, + client_info=client_info, + ), + } + + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + + def close(self): + return self._logged_channel.close() + + @property + def kind(self) -> str: + return "grpc_asyncio" + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + +__all__ = ("HypercomputeClusterGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/rest.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/rest.py new file mode 100644 index 000000000000..983c7b090dd1 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/rest.py @@ -0,0 +1,2256 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import dataclasses +import json # type: ignore +import logging +import warnings +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + +import google.protobuf +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1, operations_v1, rest_helpers, rest_streaming +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import json_format +from requests import __version__ as requests_version + +from google.cloud.hypercomputecluster_v1.types import hypercompute_cluster + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO +from .rest_base import _BaseHypercomputeClusterRestTransport + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=f"requests@{requests_version}", +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +class HypercomputeClusterRestInterceptor: + """Interceptor for HypercomputeCluster. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the HypercomputeClusterRestTransport. + + .. code-block:: python + class MyCustomHypercomputeClusterInterceptor(HypercomputeClusterRestInterceptor): + def pre_create_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_clusters(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_clusters(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + transport = HypercomputeClusterRestTransport(interceptor=MyCustomHypercomputeClusterInterceptor()) + client = HypercomputeClusterClient(transport=transport) + + + """ + + def pre_create_cluster( + self, + request: hypercompute_cluster.CreateClusterRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + hypercompute_cluster.CreateClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for create_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the HypercomputeCluster server. + """ + return request, metadata + + def post_create_cluster( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_cluster + + DEPRECATED. Please use the `post_create_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the HypercomputeCluster server but before + it is returned to user code. This `post_create_cluster` interceptor runs + before the `post_create_cluster_with_metadata` interceptor. + """ + return response + + def post_create_cluster_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the HypercomputeCluster server but before it is returned to user code. + + We recommend only using this `post_create_cluster_with_metadata` + interceptor in new development instead of the `post_create_cluster` interceptor. + When both interceptors are used, this `post_create_cluster_with_metadata` interceptor runs after the + `post_create_cluster` interceptor. The (possibly modified) response returned by + `post_create_cluster` will be passed to + `post_create_cluster_with_metadata`. + """ + return response, metadata + + def pre_delete_cluster( + self, + request: hypercompute_cluster.DeleteClusterRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + hypercompute_cluster.DeleteClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for delete_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the HypercomputeCluster server. + """ + return request, metadata + + def post_delete_cluster( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_cluster + + DEPRECATED. Please use the `post_delete_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the HypercomputeCluster server but before + it is returned to user code. This `post_delete_cluster` interceptor runs + before the `post_delete_cluster_with_metadata` interceptor. + """ + return response + + def post_delete_cluster_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the HypercomputeCluster server but before it is returned to user code. + + We recommend only using this `post_delete_cluster_with_metadata` + interceptor in new development instead of the `post_delete_cluster` interceptor. + When both interceptors are used, this `post_delete_cluster_with_metadata` interceptor runs after the + `post_delete_cluster` interceptor. The (possibly modified) response returned by + `post_delete_cluster` will be passed to + `post_delete_cluster_with_metadata`. + """ + return response, metadata + + def pre_get_cluster( + self, + request: hypercompute_cluster.GetClusterRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + hypercompute_cluster.GetClusterRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the HypercomputeCluster server. + """ + return request, metadata + + def post_get_cluster( + self, response: hypercompute_cluster.Cluster + ) -> hypercompute_cluster.Cluster: + """Post-rpc interceptor for get_cluster + + DEPRECATED. Please use the `post_get_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the HypercomputeCluster server but before + it is returned to user code. This `post_get_cluster` interceptor runs + before the `post_get_cluster_with_metadata` interceptor. + """ + return response + + def post_get_cluster_with_metadata( + self, + response: hypercompute_cluster.Cluster, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[hypercompute_cluster.Cluster, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the HypercomputeCluster server but before it is returned to user code. + + We recommend only using this `post_get_cluster_with_metadata` + interceptor in new development instead of the `post_get_cluster` interceptor. + When both interceptors are used, this `post_get_cluster_with_metadata` interceptor runs after the + `post_get_cluster` interceptor. The (possibly modified) response returned by + `post_get_cluster` will be passed to + `post_get_cluster_with_metadata`. + """ + return response, metadata + + def pre_list_clusters( + self, + request: hypercompute_cluster.ListClustersRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + hypercompute_cluster.ListClustersRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_clusters + + Override in a subclass to manipulate the request or metadata + before they are sent to the HypercomputeCluster server. + """ + return request, metadata + + def post_list_clusters( + self, response: hypercompute_cluster.ListClustersResponse + ) -> hypercompute_cluster.ListClustersResponse: + """Post-rpc interceptor for list_clusters + + DEPRECATED. Please use the `post_list_clusters_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the HypercomputeCluster server but before + it is returned to user code. This `post_list_clusters` interceptor runs + before the `post_list_clusters_with_metadata` interceptor. + """ + return response + + def post_list_clusters_with_metadata( + self, + response: hypercompute_cluster.ListClustersResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + hypercompute_cluster.ListClustersResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_clusters + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the HypercomputeCluster server but before it is returned to user code. + + We recommend only using this `post_list_clusters_with_metadata` + interceptor in new development instead of the `post_list_clusters` interceptor. + When both interceptors are used, this `post_list_clusters_with_metadata` interceptor runs after the + `post_list_clusters` interceptor. The (possibly modified) response returned by + `post_list_clusters` will be passed to + `post_list_clusters_with_metadata`. + """ + return response, metadata + + def pre_update_cluster( + self, + request: hypercompute_cluster.UpdateClusterRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + hypercompute_cluster.UpdateClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for update_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the HypercomputeCluster server. + """ + return request, metadata + + def post_update_cluster( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_cluster + + DEPRECATED. Please use the `post_update_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the HypercomputeCluster server but before + it is returned to user code. This `post_update_cluster` interceptor runs + before the `post_update_cluster_with_metadata` interceptor. + """ + return response + + def post_update_cluster_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the HypercomputeCluster server but before it is returned to user code. + + We recommend only using this `post_update_cluster_with_metadata` + interceptor in new development instead of the `post_update_cluster` interceptor. + When both interceptors are used, this `post_update_cluster_with_metadata` interceptor runs after the + `post_update_cluster` interceptor. The (possibly modified) response returned by + `post_update_cluster` will be passed to + `post_update_cluster_with_metadata`. + """ + return response, metadata + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.GetLocationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the HypercomputeCluster server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the HypercomputeCluster server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.ListLocationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the HypercomputeCluster server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the HypercomputeCluster server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the HypercomputeCluster server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the HypercomputeCluster server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the HypercomputeCluster server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the HypercomputeCluster server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the HypercomputeCluster server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the HypercomputeCluster server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the HypercomputeCluster server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the HypercomputeCluster server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class HypercomputeClusterRestStub: + _session: AuthorizedSession + _host: str + _interceptor: HypercomputeClusterRestInterceptor + + +class HypercomputeClusterRestTransport(_BaseHypercomputeClusterRestTransport): + """REST backend synchronous transport for HypercomputeCluster. + + Service describing handlers for resources + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "hypercomputecluster.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[HypercomputeClusterRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'hypercomputecluster.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + url_scheme=url_scheme, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or HypercomputeClusterRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + "body": "*", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _CreateCluster( + _BaseHypercomputeClusterRestTransport._BaseCreateCluster, + HypercomputeClusterRestStub, + ): + def __hash__(self): + return hash("HypercomputeClusterRestTransport.CreateCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: hypercompute_cluster.CreateClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create cluster method over HTTP. + + Args: + request (~.hypercompute_cluster.CreateClusterRequest): + The request object. Request message for + [CreateCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.CreateCluster]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = _BaseHypercomputeClusterRestTransport._BaseCreateCluster._get_http_options() + + request, metadata = self._interceptor.pre_create_cluster(request, metadata) + transcoded_request = _BaseHypercomputeClusterRestTransport._BaseCreateCluster._get_transcoded_request( + http_options, request + ) + + body = _BaseHypercomputeClusterRestTransport._BaseCreateCluster._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseHypercomputeClusterRestTransport._BaseCreateCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.CreateCluster", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "CreateCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = HypercomputeClusterRestTransport._CreateCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.create_cluster", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "CreateCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _DeleteCluster( + _BaseHypercomputeClusterRestTransport._BaseDeleteCluster, + HypercomputeClusterRestStub, + ): + def __hash__(self): + return hash("HypercomputeClusterRestTransport.DeleteCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: hypercompute_cluster.DeleteClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete cluster method over HTTP. + + Args: + request (~.hypercompute_cluster.DeleteClusterRequest): + The request object. Request message for + [DeleteCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.DeleteCluster]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = _BaseHypercomputeClusterRestTransport._BaseDeleteCluster._get_http_options() + + request, metadata = self._interceptor.pre_delete_cluster(request, metadata) + transcoded_request = _BaseHypercomputeClusterRestTransport._BaseDeleteCluster._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseHypercomputeClusterRestTransport._BaseDeleteCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.DeleteCluster", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "DeleteCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = HypercomputeClusterRestTransport._DeleteCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_delete_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.delete_cluster", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "DeleteCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetCluster( + _BaseHypercomputeClusterRestTransport._BaseGetCluster, + HypercomputeClusterRestStub, + ): + def __hash__(self): + return hash("HypercomputeClusterRestTransport.GetCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: hypercompute_cluster.GetClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> hypercompute_cluster.Cluster: + r"""Call the get cluster method over HTTP. + + Args: + request (~.hypercompute_cluster.GetClusterRequest): + The request object. Request message for + [GetCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.GetCluster]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.hypercompute_cluster.Cluster: + A collection of virtual machines and + connected resources forming a + high-performance computing cluster + capable of running large-scale, tightly + coupled workloads. A cluster combines a + set a compute resources that perform + computations, storage resources that + contain inputs and store outputs, an + orchestrator that is responsible for + assigning jobs to compute resources, and + network resources that connect + everything together. + + """ + + http_options = _BaseHypercomputeClusterRestTransport._BaseGetCluster._get_http_options() + + request, metadata = self._interceptor.pre_get_cluster(request, metadata) + transcoded_request = _BaseHypercomputeClusterRestTransport._BaseGetCluster._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseHypercomputeClusterRestTransport._BaseGetCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.GetCluster", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "GetCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = HypercomputeClusterRestTransport._GetCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = hypercompute_cluster.Cluster() + pb_resp = hypercompute_cluster.Cluster.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = hypercompute_cluster.Cluster.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.get_cluster", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "GetCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListClusters( + _BaseHypercomputeClusterRestTransport._BaseListClusters, + HypercomputeClusterRestStub, + ): + def __hash__(self): + return hash("HypercomputeClusterRestTransport.ListClusters") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: hypercompute_cluster.ListClustersRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> hypercompute_cluster.ListClustersResponse: + r"""Call the list clusters method over HTTP. + + Args: + request (~.hypercompute_cluster.ListClustersRequest): + The request object. Request message for + [ListClusters][google.cloud.hypercomputecluster.v1.HypercomputeCluster.ListClusters]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.hypercompute_cluster.ListClustersResponse: + Response message for + [ListClusters][google.cloud.hypercomputecluster.v1.HypercomputeCluster.ListClusters]. + + """ + + http_options = _BaseHypercomputeClusterRestTransport._BaseListClusters._get_http_options() + + request, metadata = self._interceptor.pre_list_clusters(request, metadata) + transcoded_request = _BaseHypercomputeClusterRestTransport._BaseListClusters._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseHypercomputeClusterRestTransport._BaseListClusters._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.ListClusters", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "ListClusters", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = HypercomputeClusterRestTransport._ListClusters._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = hypercompute_cluster.ListClustersResponse() + pb_resp = hypercompute_cluster.ListClustersResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_clusters(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_clusters_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + hypercompute_cluster.ListClustersResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.list_clusters", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "ListClusters", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _UpdateCluster( + _BaseHypercomputeClusterRestTransport._BaseUpdateCluster, + HypercomputeClusterRestStub, + ): + def __hash__(self): + return hash("HypercomputeClusterRestTransport.UpdateCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: hypercompute_cluster.UpdateClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the update cluster method over HTTP. + + Args: + request (~.hypercompute_cluster.UpdateClusterRequest): + The request object. Request message for + [UpdateCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.UpdateCluster]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = _BaseHypercomputeClusterRestTransport._BaseUpdateCluster._get_http_options() + + request, metadata = self._interceptor.pre_update_cluster(request, metadata) + transcoded_request = _BaseHypercomputeClusterRestTransport._BaseUpdateCluster._get_transcoded_request( + http_options, request + ) + + body = _BaseHypercomputeClusterRestTransport._BaseUpdateCluster._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseHypercomputeClusterRestTransport._BaseUpdateCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.UpdateCluster", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "UpdateCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = HypercomputeClusterRestTransport._UpdateCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_update_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.update_cluster", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "UpdateCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + @property + def create_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.CreateClusterRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.DeleteClusterRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.GetClusterRequest], hypercompute_cluster.Cluster + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_clusters( + self, + ) -> Callable[ + [hypercompute_cluster.ListClustersRequest], + hypercompute_cluster.ListClustersResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListClusters(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_cluster( + self, + ) -> Callable[ + [hypercompute_cluster.UpdateClusterRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation( + _BaseHypercomputeClusterRestTransport._BaseGetLocation, + HypercomputeClusterRestStub, + ): + def __hash__(self): + return hash("HypercomputeClusterRestTransport.GetLocation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options = _BaseHypercomputeClusterRestTransport._BaseGetLocation._get_http_options() + + request, metadata = self._interceptor.pre_get_location(request, metadata) + transcoded_request = _BaseHypercomputeClusterRestTransport._BaseGetLocation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseHypercomputeClusterRestTransport._BaseGetLocation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.GetLocation", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "GetLocation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = HypercomputeClusterRestTransport._GetLocation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = locations_pb2.Location() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_get_location(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient.GetLocation", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "GetLocation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations( + _BaseHypercomputeClusterRestTransport._BaseListLocations, + HypercomputeClusterRestStub, + ): + def __hash__(self): + return hash("HypercomputeClusterRestTransport.ListLocations") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options = _BaseHypercomputeClusterRestTransport._BaseListLocations._get_http_options() + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + transcoded_request = _BaseHypercomputeClusterRestTransport._BaseListLocations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseHypercomputeClusterRestTransport._BaseListLocations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.ListLocations", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "ListLocations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = HypercomputeClusterRestTransport._ListLocations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_list_locations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient.ListLocations", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "ListLocations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation( + _BaseHypercomputeClusterRestTransport._BaseCancelOperation, + HypercomputeClusterRestStub, + ): + def __hash__(self): + return hash("HypercomputeClusterRestTransport.CancelOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = _BaseHypercomputeClusterRestTransport._BaseCancelOperation._get_http_options() + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + transcoded_request = _BaseHypercomputeClusterRestTransport._BaseCancelOperation._get_transcoded_request( + http_options, request + ) + + body = _BaseHypercomputeClusterRestTransport._BaseCancelOperation._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseHypercomputeClusterRestTransport._BaseCancelOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.CancelOperation", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "CancelOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = HypercomputeClusterRestTransport._CancelOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation( + _BaseHypercomputeClusterRestTransport._BaseDeleteOperation, + HypercomputeClusterRestStub, + ): + def __hash__(self): + return hash("HypercomputeClusterRestTransport.DeleteOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = _BaseHypercomputeClusterRestTransport._BaseDeleteOperation._get_http_options() + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + transcoded_request = _BaseHypercomputeClusterRestTransport._BaseDeleteOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseHypercomputeClusterRestTransport._BaseDeleteOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.DeleteOperation", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "DeleteOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = HypercomputeClusterRestTransport._DeleteOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation( + _BaseHypercomputeClusterRestTransport._BaseGetOperation, + HypercomputeClusterRestStub, + ): + def __hash__(self): + return hash("HypercomputeClusterRestTransport.GetOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options = _BaseHypercomputeClusterRestTransport._BaseGetOperation._get_http_options() + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + transcoded_request = _BaseHypercomputeClusterRestTransport._BaseGetOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseHypercomputeClusterRestTransport._BaseGetOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.GetOperation", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "GetOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = HypercomputeClusterRestTransport._GetOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_get_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient.GetOperation", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "GetOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations( + _BaseHypercomputeClusterRestTransport._BaseListOperations, + HypercomputeClusterRestStub, + ): + def __hash__(self): + return hash("HypercomputeClusterRestTransport.ListOperations") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options = _BaseHypercomputeClusterRestTransport._BaseListOperations._get_http_options() + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + transcoded_request = _BaseHypercomputeClusterRestTransport._BaseListOperations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseHypercomputeClusterRestTransport._BaseListOperations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.ListOperations", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "ListOperations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = HypercomputeClusterRestTransport._ListOperations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_list_operations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient.ListOperations", + extra={ + "serviceName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "rpcName": "ListOperations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("HypercomputeClusterRestTransport",) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/rest_base.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/rest_base.py new file mode 100644 index 000000000000..1609e3dd5a53 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/services/hypercompute_cluster/transports/rest_base.py @@ -0,0 +1,506 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json # type: ignore +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1, path_template +from google.cloud.location import locations_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import json_format + +from google.cloud.hypercomputecluster_v1.types import hypercompute_cluster + +from .base import DEFAULT_CLIENT_INFO, HypercomputeClusterTransport + + +class _BaseHypercomputeClusterRestTransport(HypercomputeClusterTransport): + """Base REST backend transport for HypercomputeCluster. + + Note: This class is not meant to be used directly. Use its sync and + async sub-classes instead. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "hypercomputecluster.googleapis.com", + credentials: Optional[Any] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + Args: + host (Optional[str]): + The hostname to connect to (default: 'hypercomputecluster.googleapis.com'). + credentials (Optional[Any]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + class _BaseCreateCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "clusterId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{parent=projects/*/locations/*}/clusters", + "body": "cluster", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = hypercompute_cluster.CreateClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseHypercomputeClusterRestTransport._BaseCreateCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/clusters/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = hypercompute_cluster.DeleteClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseHypercomputeClusterRestTransport._BaseDeleteCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/clusters/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = hypercompute_cluster.GetClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseHypercomputeClusterRestTransport._BaseGetCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListClusters: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/clusters", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = hypercompute_cluster.ListClustersRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseHypercomputeClusterRestTransport._BaseListClusters._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1/{cluster.name=projects/*/locations/*/clusters/*}", + "body": "cluster", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = hypercompute_cluster.UpdateClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseHypercomputeClusterRestTransport._BaseUpdateCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetLocation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseListLocations: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*}/locations", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseCancelOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + body = json.dumps(transcoded_request["body"]) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseDeleteOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseGetOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseListOperations: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + +__all__ = ("_BaseHypercomputeClusterRestTransport",) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/types/__init__.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/types/__init__.py new file mode 100644 index 000000000000..4abfff6d7810 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/types/__init__.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .hypercompute_cluster import ( + BootDisk, + BucketReference, + Cluster, + ComputeInstance, + ComputeInstanceSlurmNodeSet, + ComputeResource, + ComputeResourceConfig, + CreateClusterRequest, + DeleteClusterRequest, + ExistingBucketConfig, + ExistingFilestoreConfig, + ExistingLustreConfig, + ExistingNetworkConfig, + FileShareConfig, + FilestoreReference, + GcsAutoclassConfig, + GcsHierarchicalNamespaceConfig, + GetClusterRequest, + ListClustersRequest, + ListClustersResponse, + LustreReference, + NetworkReference, + NetworkResource, + NetworkResourceConfig, + NewBucketConfig, + NewFilestoreConfig, + NewFlexStartInstancesConfig, + NewLustreConfig, + NewNetworkConfig, + NewOnDemandInstancesConfig, + NewReservedInstancesConfig, + NewSpotInstancesConfig, + Orchestrator, + SlurmLoginNodes, + SlurmNodeSet, + SlurmOrchestrator, + SlurmPartition, + StorageConfig, + StorageResource, + StorageResourceConfig, + UpdateClusterRequest, +) +from .operation_metadata import ( + CheckClusterHealth, + CreateFilestoreInstance, + CreateLoginNode, + CreateLustreInstance, + CreateNetwork, + CreateNodeset, + CreateOrchestrator, + CreatePartition, + CreatePrivateServiceAccess, + CreateStorageBucket, + DeleteFilestoreInstance, + DeleteLoginNode, + DeleteLustreInstance, + DeleteNetwork, + DeleteNodeset, + DeleteOrchestrator, + DeletePartition, + DeletePrivateServiceAccess, + DeleteStorageBucket, + OperationMetadata, + OperationProgress, + OperationStep, + UpdateLoginNode, + UpdateNodeset, + UpdateOrchestrator, + UpdatePartition, +) + +__all__ = ( + "BootDisk", + "BucketReference", + "Cluster", + "ComputeInstance", + "ComputeInstanceSlurmNodeSet", + "ComputeResource", + "ComputeResourceConfig", + "CreateClusterRequest", + "DeleteClusterRequest", + "ExistingBucketConfig", + "ExistingFilestoreConfig", + "ExistingLustreConfig", + "ExistingNetworkConfig", + "FileShareConfig", + "FilestoreReference", + "GcsAutoclassConfig", + "GcsHierarchicalNamespaceConfig", + "GetClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "LustreReference", + "NetworkReference", + "NetworkResource", + "NetworkResourceConfig", + "NewBucketConfig", + "NewFilestoreConfig", + "NewFlexStartInstancesConfig", + "NewLustreConfig", + "NewNetworkConfig", + "NewOnDemandInstancesConfig", + "NewReservedInstancesConfig", + "NewSpotInstancesConfig", + "Orchestrator", + "SlurmLoginNodes", + "SlurmNodeSet", + "SlurmOrchestrator", + "SlurmPartition", + "StorageConfig", + "StorageResource", + "StorageResourceConfig", + "UpdateClusterRequest", + "CheckClusterHealth", + "CreateFilestoreInstance", + "CreateLoginNode", + "CreateLustreInstance", + "CreateNetwork", + "CreateNodeset", + "CreateOrchestrator", + "CreatePartition", + "CreatePrivateServiceAccess", + "CreateStorageBucket", + "DeleteFilestoreInstance", + "DeleteLoginNode", + "DeleteLustreInstance", + "DeleteNetwork", + "DeleteNodeset", + "DeleteOrchestrator", + "DeletePartition", + "DeletePrivateServiceAccess", + "DeleteStorageBucket", + "OperationMetadata", + "OperationProgress", + "OperationStep", + "UpdateLoginNode", + "UpdateNodeset", + "UpdateOrchestrator", + "UpdatePartition", +) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/types/hypercompute_cluster.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/types/hypercompute_cluster.py new file mode 100644 index 000000000000..cf9101df19cb --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/types/hypercompute_cluster.py @@ -0,0 +1,1704 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.cloud.hypercomputecluster.v1", + manifest={ + "Cluster", + "ListClustersRequest", + "ListClustersResponse", + "GetClusterRequest", + "CreateClusterRequest", + "UpdateClusterRequest", + "DeleteClusterRequest", + "NetworkResource", + "NetworkReference", + "NetworkResourceConfig", + "NewNetworkConfig", + "ExistingNetworkConfig", + "StorageResource", + "FilestoreReference", + "BucketReference", + "LustreReference", + "StorageResourceConfig", + "NewFilestoreConfig", + "FileShareConfig", + "ExistingFilestoreConfig", + "NewBucketConfig", + "GcsAutoclassConfig", + "GcsHierarchicalNamespaceConfig", + "ExistingBucketConfig", + "NewLustreConfig", + "ExistingLustreConfig", + "ComputeResource", + "ComputeResourceConfig", + "NewOnDemandInstancesConfig", + "NewSpotInstancesConfig", + "NewReservedInstancesConfig", + "NewFlexStartInstancesConfig", + "BootDisk", + "Orchestrator", + "SlurmOrchestrator", + "SlurmNodeSet", + "ComputeInstanceSlurmNodeSet", + "SlurmPartition", + "SlurmLoginNodes", + "StorageConfig", + "ComputeInstance", + }, +) + + +class Cluster(proto.Message): + r"""A collection of virtual machines and connected resources + forming a high-performance computing cluster capable of running + large-scale, tightly coupled workloads. A cluster combines a set + a compute resources that perform computations, storage resources + that contain inputs and store outputs, an orchestrator that is + responsible for assigning jobs to compute resources, and network + resources that connect everything together. + + Attributes: + name (str): + Identifier. `Relative resource + name `__ of the cluster, in the + format + ``projects/{project}/locations/{location}/clusters/{cluster}``. + description (str): + Optional. User-provided description of the + cluster. + labels (MutableMapping[str, str]): + Optional. + `Labels `__ + applied to the cluster. Labels can be used to organize + clusters and to filter them in queries. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time that the cluster was + originally created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time that the cluster was most + recently updated. + reconciling (bool): + Output only. Indicates whether changes to the cluster are + currently in flight. If this is ``true``, then the current + state might not match the cluster's intended state. + network_resources (MutableMapping[str, google.cloud.hypercomputecluster_v1.types.NetworkResource]): + Optional. Network resources available to the cluster. Must + contain at most one value. Keys specify the ID of the + network resource by which it can be referenced elsewhere, + and must conform to + `RFC-1034 `__ + (lower-case, alphanumeric, and at most 63 characters). + storage_resources (MutableMapping[str, google.cloud.hypercomputecluster_v1.types.StorageResource]): + Optional. Storage resources available to the cluster. Keys + specify the ID of the storage resource by which it can be + referenced elsewhere, and must conform to + `RFC-1034 `__ + (lower-case, alphanumeric, and at most 63 characters). + compute_resources (MutableMapping[str, google.cloud.hypercomputecluster_v1.types.ComputeResource]): + Optional. Compute resources available to the cluster. Keys + specify the ID of the compute resource by which it can be + referenced elsewhere, and must conform to + `RFC-1034 `__ + (lower-case, alphanumeric, and at most 63 characters). + orchestrator (google.cloud.hypercomputecluster_v1.types.Orchestrator): + Optional. Orchestrator that is responsible + for scheduling and running jobs on the cluster. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + description: str = proto.Field( + proto.STRING, + number=9, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + reconciling: bool = proto.Field( + proto.BOOL, + number=10, + ) + network_resources: MutableMapping[str, "NetworkResource"] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=11, + message="NetworkResource", + ) + storage_resources: MutableMapping[str, "StorageResource"] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=12, + message="StorageResource", + ) + compute_resources: MutableMapping[str, "ComputeResource"] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=13, + message="ComputeResource", + ) + orchestrator: "Orchestrator" = proto.Field( + proto.MESSAGE, + number=8, + message="Orchestrator", + ) + + +class ListClustersRequest(proto.Message): + r"""Request message for + [ListClusters][google.cloud.hypercomputecluster.v1.HypercomputeCluster.ListClusters]. + + Attributes: + parent (str): + Required. Parent location of the clusters to list, in the + format ``projects/{project}/locations/{location}``. + page_size (int): + Optional. Maximum number of clusters to + return. The service may return fewer than this + value. + page_token (str): + Optional. A page token received from a previous + ``ListClusters`` call. Provide this to retrieve the + subsequent page. When paginating, all other parameters + provided to ``ListClusters`` must match the call that + provided the page token. + filter (str): + Optional. `Filter `__ to apply + to the returned results. + order_by (str): + Optional. How to order the resulting clusters. Must be one + of the following strings: + + - ``name`` + - ``name desc`` + - ``create_time`` + - ``create_time desc`` + + If not specified, clusters will be returned in an arbitrary + order. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + order_by: str = proto.Field( + proto.STRING, + number=5, + ) + + +class ListClustersResponse(proto.Message): + r"""Response message for + [ListClusters][google.cloud.hypercomputecluster.v1.HypercomputeCluster.ListClusters]. + + Attributes: + clusters (MutableSequence[google.cloud.hypercomputecluster_v1.types.Cluster]): + Clusters in the specified location. + next_page_token (str): + A token that can be sent as ``page_token`` to retrieve the + next page. If this field is absent, there are no subsequent + pages. + unreachable (MutableSequence[str]): + Locations that could not be reached. + """ + + @property + def raw_page(self): + return self + + clusters: MutableSequence["Cluster"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Cluster", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + unreachable: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +class GetClusterRequest(proto.Message): + r"""Request message for + [GetCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.GetCluster]. + + Attributes: + name (str): + Required. Name of the cluster to retrieve, in the format + ``projects/{project}/locations/{location}/clusters/{cluster}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateClusterRequest(proto.Message): + r"""Request message for + [CreateCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.CreateCluster]. + + Attributes: + parent (str): + Required. Parent location in which the cluster should be + created, in the format + ``projects/{project}/locations/{location}``. + cluster_id (str): + Required. ID of the cluster to create. Must conform to + `RFC-1034 `__ + (lower-case, alphanumeric, and at most 63 characters). + cluster (google.cloud.hypercomputecluster_v1.types.Cluster): + Required. Cluster to create. + request_id (str): + Optional. A unique identifier for this request. A random + UUID is recommended. This request is idempotent if and only + if ``request_id`` is provided. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + cluster_id: str = proto.Field( + proto.STRING, + number=2, + ) + cluster: "Cluster" = proto.Field( + proto.MESSAGE, + number=3, + message="Cluster", + ) + request_id: str = proto.Field( + proto.STRING, + number=4, + ) + + +class UpdateClusterRequest(proto.Message): + r"""Request message for + [UpdateCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.UpdateCluster]. + + Attributes: + cluster (google.cloud.hypercomputecluster_v1.types.Cluster): + Required. Cluster to update. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields in the + cluster to update. All paths must be specified + explicitly - wildcards are not supported. At + least one path must be provided. + request_id (str): + Optional. A unique identifier for this request. A random + UUID is recommended. This request is idempotent if and only + if ``request_id`` is provided. + """ + + cluster: "Cluster" = proto.Field( + proto.MESSAGE, + number=2, + message="Cluster", + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + request_id: str = proto.Field( + proto.STRING, + number=3, + ) + + +class DeleteClusterRequest(proto.Message): + r"""Request message for + [DeleteCluster][google.cloud.hypercomputecluster.v1.HypercomputeCluster.DeleteCluster]. + + Attributes: + name (str): + Required. Name of the cluster to delete, in the format + ``projects/{project}/locations/{location}/clusters/{cluster}``. + request_id (str): + Optional. A unique identifier for this request. A random + UUID is recommended. This request is idempotent if and only + if ``request_id`` is provided. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + request_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +class NetworkResource(proto.Message): + r"""A resource representing a network that connects the various + components of a cluster together. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + network (google.cloud.hypercomputecluster_v1.types.NetworkReference): + Reference to a network in Google Compute + Engine. + + This field is a member of `oneof`_ ``reference``. + config (google.cloud.hypercomputecluster_v1.types.NetworkResourceConfig): + Immutable. Configuration for this network + resource, which describes how it should be + created or imported. This field only controls + how the network resource is initially created or + imported. Subsequent changes to the network + resource should be made via the resource's API + and will not be reflected in the configuration. + """ + + network: "NetworkReference" = proto.Field( + proto.MESSAGE, + number=3, + oneof="reference", + message="NetworkReference", + ) + config: "NetworkResourceConfig" = proto.Field( + proto.MESSAGE, + number=2, + message="NetworkResourceConfig", + ) + + +class NetworkReference(proto.Message): + r"""A reference to a `VPC + network `__ in Google Compute + Engine. + + Attributes: + network (str): + Output only. Name of the network, in the format + ``projects/{project}/global/networks/{network}``. + subnetwork (str): + Output only. Name of the particular subnetwork being used by + the cluster, in the format + ``projects/{project}/regions/{region}/subnetworks/{subnetwork}``. + """ + + network: str = proto.Field( + proto.STRING, + number=1, + ) + subnetwork: str = proto.Field( + proto.STRING, + number=2, + ) + + +class NetworkResourceConfig(proto.Message): + r"""Describes how a network resource should be initialized. Each + network resource can either be imported from an existing Google + Cloud resource or initialized when the cluster is created. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + new_network (google.cloud.hypercomputecluster_v1.types.NewNetworkConfig): + Optional. Immutable. If set, indicates that a + new network should be created. + + This field is a member of `oneof`_ ``config``. + existing_network (google.cloud.hypercomputecluster_v1.types.ExistingNetworkConfig): + Optional. Immutable. If set, indicates that + an existing network should be imported. + + This field is a member of `oneof`_ ``config``. + """ + + new_network: "NewNetworkConfig" = proto.Field( + proto.MESSAGE, + number=3, + oneof="config", + message="NewNetworkConfig", + ) + existing_network: "ExistingNetworkConfig" = proto.Field( + proto.MESSAGE, + number=4, + oneof="config", + message="ExistingNetworkConfig", + ) + + +class NewNetworkConfig(proto.Message): + r"""When set in a + [NetworkResourceConfig][google.cloud.hypercomputecluster.v1.NetworkResourceConfig], + indicates that a new network should be created. + + Attributes: + network (str): + Required. Immutable. Name of the network to create, in the + format ``projects/{project}/global/networks/{network}``. + description (str): + Optional. Immutable. Description of the + network. Maximum of 2048 characters. + """ + + network: str = proto.Field( + proto.STRING, + number=1, + ) + description: str = proto.Field( + proto.STRING, + number=2, + ) + + +class ExistingNetworkConfig(proto.Message): + r"""When set in a + [NetworkResourceConfig][google.cloud.hypercomputecluster.v1.NetworkResourceConfig], + indicates that an existing network should be imported. + + Attributes: + network (str): + Required. Immutable. Name of the network to import, in the + format ``projects/{project}/global/networks/{network}``. + subnetwork (str): + Required. Immutable. Particular subnetwork to use, in the + format + ``projects/{project}/regions/{region}/subnetworks/{subnetwork}``. + """ + + network: str = proto.Field( + proto.STRING, + number=1, + ) + subnetwork: str = proto.Field( + proto.STRING, + number=2, + ) + + +class StorageResource(proto.Message): + r"""A resource representing a form of persistent storage that is + accessible to compute resources in the cluster. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + filestore (google.cloud.hypercomputecluster_v1.types.FilestoreReference): + Reference to a Filestore instance. Populated + if and only if the storage resource was + configured to use Filestore. + + This field is a member of `oneof`_ ``reference``. + bucket (google.cloud.hypercomputecluster_v1.types.BucketReference): + Reference to a Google Cloud Storage bucket. + Populated if and only if the storage resource + was configured to use Google Cloud Storage. + + This field is a member of `oneof`_ ``reference``. + lustre (google.cloud.hypercomputecluster_v1.types.LustreReference): + Reference to a Managed Lustre instance. + Populated if and only if the storage resource + was configured to use Managed Lustre. + + This field is a member of `oneof`_ ``reference``. + config (google.cloud.hypercomputecluster_v1.types.StorageResourceConfig): + Required. Immutable. Configuration for this + storage resource, which describes how it should + be created or imported. This field only controls + how the storage resource is initially created or + imported. Subsequent changes to the storage + resource should be made via the resource's API + and will not be reflected in the configuration. + """ + + filestore: "FilestoreReference" = proto.Field( + proto.MESSAGE, + number=1, + oneof="reference", + message="FilestoreReference", + ) + bucket: "BucketReference" = proto.Field( + proto.MESSAGE, + number=2, + oneof="reference", + message="BucketReference", + ) + lustre: "LustreReference" = proto.Field( + proto.MESSAGE, + number=3, + oneof="reference", + message="LustreReference", + ) + config: "StorageResourceConfig" = proto.Field( + proto.MESSAGE, + number=4, + message="StorageResourceConfig", + ) + + +class FilestoreReference(proto.Message): + r"""A reference to a `Filestore `__ + instance. + + Attributes: + filestore (str): + Output only. Name of the Filestore instance, in the format + ``projects/{project}/locations/{location}/instances/{instance}`` + """ + + filestore: str = proto.Field( + proto.STRING, + number=1, + ) + + +class BucketReference(proto.Message): + r"""A reference to a `Google Cloud + Storage `__ bucket. + + Attributes: + bucket (str): + Output only. Name of the bucket. + """ + + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + + +class LustreReference(proto.Message): + r"""A reference to a `Managed + Lustre `__ + instance. + + Attributes: + lustre (str): + Output only. Name of the Managed Lustre instance, in the + format + ``projects/{project}/locations/{location}/instances/{instance}`` + """ + + lustre: str = proto.Field( + proto.STRING, + number=1, + ) + + +class StorageResourceConfig(proto.Message): + r"""Describes how a storage resource should be initialized. Each + storage resource can either be imported from an existing Google + Cloud resource or initialized when the cluster is created. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + new_filestore (google.cloud.hypercomputecluster_v1.types.NewFilestoreConfig): + Optional. Immutable. If set, indicates that a + new Filestore instance should be created. + + This field is a member of `oneof`_ ``config``. + existing_filestore (google.cloud.hypercomputecluster_v1.types.ExistingFilestoreConfig): + Optional. Immutable. If set, indicates that + an existing Filestore instance should be + imported. + + This field is a member of `oneof`_ ``config``. + new_bucket (google.cloud.hypercomputecluster_v1.types.NewBucketConfig): + Optional. Immutable. If set, indicates that a + new Cloud Storage bucket should be created. + + This field is a member of `oneof`_ ``config``. + existing_bucket (google.cloud.hypercomputecluster_v1.types.ExistingBucketConfig): + Optional. Immutable. If set, indicates that + an existing Cloud Storage bucket should be + imported. + + This field is a member of `oneof`_ ``config``. + new_lustre (google.cloud.hypercomputecluster_v1.types.NewLustreConfig): + Optional. Immutable. If set, indicates that a + new Managed Lustre instance should be created. + + This field is a member of `oneof`_ ``config``. + existing_lustre (google.cloud.hypercomputecluster_v1.types.ExistingLustreConfig): + Optional. Immutable. If set, indicates that + an existing Managed Lustre instance should be + imported. + + This field is a member of `oneof`_ ``config``. + """ + + new_filestore: "NewFilestoreConfig" = proto.Field( + proto.MESSAGE, + number=1, + oneof="config", + message="NewFilestoreConfig", + ) + existing_filestore: "ExistingFilestoreConfig" = proto.Field( + proto.MESSAGE, + number=2, + oneof="config", + message="ExistingFilestoreConfig", + ) + new_bucket: "NewBucketConfig" = proto.Field( + proto.MESSAGE, + number=3, + oneof="config", + message="NewBucketConfig", + ) + existing_bucket: "ExistingBucketConfig" = proto.Field( + proto.MESSAGE, + number=4, + oneof="config", + message="ExistingBucketConfig", + ) + new_lustre: "NewLustreConfig" = proto.Field( + proto.MESSAGE, + number=5, + oneof="config", + message="NewLustreConfig", + ) + existing_lustre: "ExistingLustreConfig" = proto.Field( + proto.MESSAGE, + number=6, + oneof="config", + message="ExistingLustreConfig", + ) + + +class NewFilestoreConfig(proto.Message): + r"""When set in a + [StorageResourceConfig][google.cloud.hypercomputecluster.v1.StorageResourceConfig], + indicates that a new + `Filestore `__ instance should + be created. + + Attributes: + filestore (str): + Required. Immutable. Name of the Filestore instance to + create, in the format + ``projects/{project}/locations/{location}/instances/{instance}`` + description (str): + Optional. Immutable. Description of the + instance. Maximum of 2048 characters. + file_shares (MutableSequence[google.cloud.hypercomputecluster_v1.types.FileShareConfig]): + Required. Immutable. File system shares on + the instance. Exactly one file share must be + specified. + tier (google.cloud.hypercomputecluster_v1.types.NewFilestoreConfig.Tier): + Required. Immutable. Service tier to use for + the instance. + protocol (google.cloud.hypercomputecluster_v1.types.NewFilestoreConfig.Protocol): + Optional. Immutable. Access protocol to use + for all file shares in the instance. Defaults to + NFS V3 if not set. + """ + + class Tier(proto.Enum): + r"""Available `service + tiers `__ for + Filestore instances. + + Values: + TIER_UNSPECIFIED (0): + Not set. + ZONAL (4): + Offers expanded capacity and performance + scaling capabilities suitable for + high-performance computing application + requirements. + REGIONAL (6): + Offers features and availability needed for + mission-critical, high-performance computing + workloads. + """ + + TIER_UNSPECIFIED = 0 + ZONAL = 4 + REGIONAL = 6 + + class Protocol(proto.Enum): + r"""File access protocol for Filestore instances. + + Values: + PROTOCOL_UNSPECIFIED (0): + Not set. + NFSV3 (1): + NFS 3.0. + NFSV41 (2): + NFS 4.1. + """ + + PROTOCOL_UNSPECIFIED = 0 + NFSV3 = 1 + NFSV41 = 2 + + filestore: str = proto.Field( + proto.STRING, + number=1, + ) + description: str = proto.Field( + proto.STRING, + number=4, + ) + file_shares: MutableSequence["FileShareConfig"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="FileShareConfig", + ) + tier: Tier = proto.Field( + proto.ENUM, + number=3, + enum=Tier, + ) + protocol: Protocol = proto.Field( + proto.ENUM, + number=5, + enum=Protocol, + ) + + +class FileShareConfig(proto.Message): + r"""Message describing filestore configuration + + Attributes: + capacity_gb (int): + Required. Size of the filestore in GB. Must + be between 1024 and 102400, and must meet + scalability requirements described at + https://cloud.google.com/filestore/docs/service-tiers. + file_share (str): + Required. Filestore share location + """ + + capacity_gb: int = proto.Field( + proto.INT64, + number=1, + ) + file_share: str = proto.Field( + proto.STRING, + number=2, + ) + + +class ExistingFilestoreConfig(proto.Message): + r"""When set in a + [StorageResourceConfig][google.cloud.hypercomputecluster.v1.StorageResourceConfig], + indicates that an existing + `Filestore `__ instance should + be imported. + + Attributes: + filestore (str): + Required. Immutable. Name of the Filestore instance to + import, in the format + ``projects/{project}/locations/{location}/instances/{instance}`` + """ + + filestore: str = proto.Field( + proto.STRING, + number=1, + ) + + +class NewBucketConfig(proto.Message): + r"""When set in a + [StorageResourceConfig][google.cloud.hypercomputecluster.v1.StorageResourceConfig], + indicates that a new `Google Cloud + Storage `__ bucket should be + created. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + autoclass (google.cloud.hypercomputecluster_v1.types.GcsAutoclassConfig): + Optional. Immutable. If set, indicates that the bucket + should use + `Autoclass `__. + + This field is a member of `oneof`_ ``option``. + storage_class (google.cloud.hypercomputecluster_v1.types.NewBucketConfig.StorageClass): + Optional. Immutable. If set, uses the + provided storage class as the bucket's default + storage class. + + This field is a member of `oneof`_ ``option``. + bucket (str): + Required. Immutable. Name of the Cloud + Storage bucket to create. + hierarchical_namespace (google.cloud.hypercomputecluster_v1.types.GcsHierarchicalNamespaceConfig): + Optional. Immutable. If set, indicates that the bucket + should use `hierarchical + namespaces `__. + """ + + class StorageClass(proto.Enum): + r"""`Storage + class `__ for + a Cloud Storage bucket. + + Values: + STORAGE_CLASS_UNSPECIFIED (0): + Not set. + STANDARD (1): + Best for data that is frequently accessed. + NEARLINE (2): + Low-cost storage for data that is accessed + less frequently. + COLDLINE (3): + Very low-cost storage for infrequently + accessed data. + ARCHIVE (4): + Lowest-cost storage for data archiving, + online backup, and disaster recovery. + """ + + STORAGE_CLASS_UNSPECIFIED = 0 + STANDARD = 1 + NEARLINE = 2 + COLDLINE = 3 + ARCHIVE = 4 + + autoclass: "GcsAutoclassConfig" = proto.Field( + proto.MESSAGE, + number=2, + oneof="option", + message="GcsAutoclassConfig", + ) + storage_class: StorageClass = proto.Field( + proto.ENUM, + number=3, + oneof="option", + enum=StorageClass, + ) + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + hierarchical_namespace: "GcsHierarchicalNamespaceConfig" = proto.Field( + proto.MESSAGE, + number=4, + message="GcsHierarchicalNamespaceConfig", + ) + + +class GcsAutoclassConfig(proto.Message): + r"""Message describing Google Cloud Storage autoclass + configuration + + Attributes: + enabled (bool): + Required. Enables Auto-class feature. + terminal_storage_class (google.cloud.hypercomputecluster_v1.types.GcsAutoclassConfig.TerminalStorageClass): + Optional. Terminal storage class of the + autoclass bucket + """ + + class TerminalStorageClass(proto.Enum): + r"""Terminal storage class types of the autoclass bucket + + Values: + TERMINAL_STORAGE_CLASS_UNSPECIFIED (0): + Unspecified terminal storage class + """ + + TERMINAL_STORAGE_CLASS_UNSPECIFIED = 0 + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + terminal_storage_class: TerminalStorageClass = proto.Field( + proto.ENUM, + number=2, + enum=TerminalStorageClass, + ) + + +class GcsHierarchicalNamespaceConfig(proto.Message): + r"""Message describing Google Cloud Storage hierarchical + namespace configuration + + Attributes: + enabled (bool): + Required. Enables hierarchical namespace + setup for the bucket. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + + +class ExistingBucketConfig(proto.Message): + r"""When set in a + [StorageResourceConfig][google.cloud.hypercomputecluster.v1.StorageResourceConfig], + indicates that an existing `Google Cloud + Storage `__ bucket should be + imported. + + Attributes: + bucket (str): + Required. Immutable. Name of the Cloud + Storage bucket to import. + """ + + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + + +class NewLustreConfig(proto.Message): + r"""When set in a + [StorageResourceConfig][google.cloud.hypercomputecluster.v1.StorageResourceConfig], + indicates that a new `Managed + Lustre `__ + instance should be created. + + Attributes: + lustre (str): + Required. Immutable. Name of the Managed Lustre instance to + create, in the format + ``projects/{project}/locations/{location}/instances/{instance}`` + description (str): + Optional. Immutable. Description of the + Managed Lustre instance. Maximum of 2048 + characters. + filesystem (str): + Required. Immutable. Filesystem name for this + instance. This name is used by client-side + tools, including when mounting the instance. + Must be 8 characters or less and can only + contain letters and numbers. + capacity_gb (int): + Required. Immutable. Storage capacity of the + instance in gibibytes (GiB). Allowed values are + between 18000 and 7632000. + """ + + lustre: str = proto.Field( + proto.STRING, + number=1, + ) + description: str = proto.Field( + proto.STRING, + number=2, + ) + filesystem: str = proto.Field( + proto.STRING, + number=3, + ) + capacity_gb: int = proto.Field( + proto.INT64, + number=4, + ) + + +class ExistingLustreConfig(proto.Message): + r"""When set in a + [StorageResourceConfig][google.cloud.hypercomputecluster.v1.StorageResourceConfig], + indicates that an existing `Managed + Lustre `__ + instance should be imported. + + Attributes: + lustre (str): + Required. Immutable. Name of the Managed Lustre instance to + import, in the format + ``projects/{project}/locations/{location}/instances/{instance}`` + """ + + lustre: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ComputeResource(proto.Message): + r"""A resource defining how virtual machines and accelerators + should be provisioned for the cluster. + + Attributes: + config (google.cloud.hypercomputecluster_v1.types.ComputeResourceConfig): + Required. Immutable. Configuration for this + compute resource, which describes how it should + be created at runtime. + """ + + config: "ComputeResourceConfig" = proto.Field( + proto.MESSAGE, + number=6, + message="ComputeResourceConfig", + ) + + +class ComputeResourceConfig(proto.Message): + r"""Describes how a compute resource should be created at + runtime. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + new_on_demand_instances (google.cloud.hypercomputecluster_v1.types.NewOnDemandInstancesConfig): + Optional. Immutable. If set, indicates that + this resource should use on-demand VMs. + + This field is a member of `oneof`_ ``config``. + new_spot_instances (google.cloud.hypercomputecluster_v1.types.NewSpotInstancesConfig): + Optional. Immutable. If set, indicates that + this resource should use spot VMs. + + This field is a member of `oneof`_ ``config``. + new_reserved_instances (google.cloud.hypercomputecluster_v1.types.NewReservedInstancesConfig): + Optional. Immutable. If set, indicates that + this resource should use reserved VMs. + + This field is a member of `oneof`_ ``config``. + new_flex_start_instances (google.cloud.hypercomputecluster_v1.types.NewFlexStartInstancesConfig): + Optional. Immutable. If set, indicates that + this resource should use flex-start VMs. + + This field is a member of `oneof`_ ``config``. + """ + + new_on_demand_instances: "NewOnDemandInstancesConfig" = proto.Field( + proto.MESSAGE, + number=1, + oneof="config", + message="NewOnDemandInstancesConfig", + ) + new_spot_instances: "NewSpotInstancesConfig" = proto.Field( + proto.MESSAGE, + number=2, + oneof="config", + message="NewSpotInstancesConfig", + ) + new_reserved_instances: "NewReservedInstancesConfig" = proto.Field( + proto.MESSAGE, + number=3, + oneof="config", + message="NewReservedInstancesConfig", + ) + new_flex_start_instances: "NewFlexStartInstancesConfig" = proto.Field( + proto.MESSAGE, + number=5, + oneof="config", + message="NewFlexStartInstancesConfig", + ) + + +class NewOnDemandInstancesConfig(proto.Message): + r"""When set in a + [ComputeResourceConfig][google.cloud.hypercomputecluster.v1.ComputeResourceConfig], + indicates that on-demand (i.e., using the standard provisioning + model) VM instances should be created. + + Attributes: + zone (str): + Required. Immutable. Name of the zone in which VM instances + should run, e.g., ``us-central1-a``. Must be in the same + region as the cluster, and must match the zone of any other + resources specified in the cluster. + machine_type (str): + Required. Immutable. Name of the Compute Engine `machine + type `__ + to use, e.g. ``n2-standard-2``. + """ + + zone: str = proto.Field( + proto.STRING, + number=1, + ) + machine_type: str = proto.Field( + proto.STRING, + number=2, + ) + + +class NewSpotInstancesConfig(proto.Message): + r"""When set in a + [ComputeResourceConfig][google.cloud.hypercomputecluster.v1.ComputeResourceConfig], + indicates that `spot + VM `__ + instances should be created. + + Attributes: + zone (str): + Required. Immutable. Name of the zone in which VM instances + should run, e.g., ``us-central1-a``. Must be in the same + region as the cluster, and must match the zone of any other + resources specified in the cluster. + machine_type (str): + Required. Immutable. Name of the Compute Engine `machine + type `__ + to use, e.g. ``n2-standard-2``. + termination_action (google.cloud.hypercomputecluster_v1.types.NewSpotInstancesConfig.TerminationAction): + Optional. Termination action for the + instance. If not specified, Compute Engine sets + the termination action to DELETE. + """ + + class TerminationAction(proto.Enum): + r"""Specifies the termination action of the instance + + Values: + TERMINATION_ACTION_UNSPECIFIED (0): + Not set. + STOP (1): + Compute Engine stops the Spot VM on + preemption. + DELETE (2): + Compute Engine deletes the Spot VM on + preemption. + """ + + TERMINATION_ACTION_UNSPECIFIED = 0 + STOP = 1 + DELETE = 2 + + zone: str = proto.Field( + proto.STRING, + number=1, + ) + machine_type: str = proto.Field( + proto.STRING, + number=2, + ) + termination_action: TerminationAction = proto.Field( + proto.ENUM, + number=5, + enum=TerminationAction, + ) + + +class NewReservedInstancesConfig(proto.Message): + r"""When set in a + [ComputeResourceConfig][google.cloud.hypercomputecluster.v1.ComputeResourceConfig], + indicates that VM instances should be created from a + `reservation `__. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + reservation (str): + Optional. Immutable. Name of the reservation from which VM + instances should be created, in the format + ``projects/{project}/zones/{zone}/reservations/{reservation}``. + + This field is a member of `oneof`_ ``source``. + """ + + reservation: str = proto.Field( + proto.STRING, + number=1, + oneof="source", + ) + + +class NewFlexStartInstancesConfig(proto.Message): + r"""When set in a + [ComputeResourceConfig][google.cloud.hypercomputecluster.v1.ComputeResourceConfig], + indicates that VM instances should be created using `Flex + Start `__. + + Attributes: + zone (str): + Required. Immutable. Name of the zone in which VM instances + should run, e.g., ``us-central1-a``. Must be in the same + region as the cluster, and must match the zone of any other + resources specified in the cluster. + machine_type (str): + Required. Immutable. Name of the Compute Engine `machine + type `__ + to use, e.g. ``n2-standard-2``. + max_duration (google.protobuf.duration_pb2.Duration): + Required. Immutable. Specifies the time limit + for created instances. Instances will be + terminated at the end of this duration. + """ + + zone: str = proto.Field( + proto.STRING, + number=1, + ) + machine_type: str = proto.Field( + proto.STRING, + number=2, + ) + max_duration: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=3, + message=duration_pb2.Duration, + ) + + +class BootDisk(proto.Message): + r"""A `Persistent disk `__ + used as the boot disk for a Compute Engine VM instance. + + Attributes: + type_ (str): + Required. Immutable. `Persistent disk + type `__, + in the format + ``projects/{project}/zones/{zone}/diskTypes/{disk_type}``. + size_gb (int): + Required. Immutable. Size of the disk in + gigabytes. Must be at least 10GB. + """ + + type_: str = proto.Field( + proto.STRING, + number=1, + ) + size_gb: int = proto.Field( + proto.INT64, + number=2, + ) + + +class Orchestrator(proto.Message): + r"""The component responsible for scheduling and running + workloads on the cluster as well as providing the user interface + for interacting with the cluster at runtime. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + slurm (google.cloud.hypercomputecluster_v1.types.SlurmOrchestrator): + Optional. If set, indicates that the cluster + should use Slurm as the orchestrator. + + This field is a member of `oneof`_ ``option``. + """ + + slurm: "SlurmOrchestrator" = proto.Field( + proto.MESSAGE, + number=1, + oneof="option", + message="SlurmOrchestrator", + ) + + +class SlurmOrchestrator(proto.Message): + r"""When set in + [Orchestrator][google.cloud.hypercomputecluster.v1.Orchestrator], + indicates that the cluster should use + `Slurm `__ as the orchestrator. + + Attributes: + login_nodes (google.cloud.hypercomputecluster_v1.types.SlurmLoginNodes): + Required. Configuration for login nodes, + which allow users to access the cluster over + SSH. + node_sets (MutableSequence[google.cloud.hypercomputecluster_v1.types.SlurmNodeSet]): + Optional. Compute resource configuration for + the Slurm nodesets in your cluster. If not + specified, the cluster won't create any nodes. + partitions (MutableSequence[google.cloud.hypercomputecluster_v1.types.SlurmPartition]): + Optional. Configuration for the Slurm + partitions in your cluster. Each partition can + contain one or more nodesets, and you can submit + separate jobs on each partition. If you don't + specify at least one partition in your cluster, + you can't submit jobs to the cluster. + default_partition (str): + Optional. Default partition to use for + submitted jobs that do not explicitly specify a + partition. Required if and only if there is more + than one partition, in which case it must match + the id of one of the partitions. + prolog_bash_scripts (MutableSequence[str]): + Optional. Slurm `prolog + scripts `__, + which will be executed by compute nodes before a node begins + running a new job. Values must not be empty. + epilog_bash_scripts (MutableSequence[str]): + Optional. Slurm `epilog + scripts `__, + which will be executed by compute nodes whenever a node + finishes running a job. Values must not be empty. + """ + + login_nodes: "SlurmLoginNodes" = proto.Field( + proto.MESSAGE, + number=6, + message="SlurmLoginNodes", + ) + node_sets: MutableSequence["SlurmNodeSet"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="SlurmNodeSet", + ) + partitions: MutableSequence["SlurmPartition"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="SlurmPartition", + ) + default_partition: str = proto.Field( + proto.STRING, + number=3, + ) + prolog_bash_scripts: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=4, + ) + epilog_bash_scripts: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=5, + ) + + +class SlurmNodeSet(proto.Message): + r"""Configuration for Slurm nodesets in the cluster. Nodesets are + groups of compute nodes used by Slurm that are responsible for + running workloads submitted to the cluster. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + compute_instance (google.cloud.hypercomputecluster_v1.types.ComputeInstanceSlurmNodeSet): + Optional. If set, indicates that the nodeset + should be backed by Compute Engine instances. + + This field is a member of `oneof`_ ``type``. + id (str): + Required. Identifier for the nodeset, which allows it to be + referenced by partitions. Must conform to + `RFC-1034 `__ + (lower-case, alphanumeric, and at most 63 characters). + compute_id (str): + Optional. ID of the compute resource on which this nodeset + will run. Must match a key in the cluster's + [compute_resources][google.cloud.hypercomputecluster.v1.Cluster.compute_resources]. + storage_configs (MutableSequence[google.cloud.hypercomputecluster_v1.types.StorageConfig]): + Optional. How [storage + resources][google.cloud.hypercomputecluster.v1.StorageResource] + should be mounted on each compute node. + static_node_count (int): + Optional. Number of nodes to be statically + created for this nodeset. The cluster will + attempt to ensure that at least this many nodes + exist at all times. + max_dynamic_node_count (int): + Optional. Controls how many additional nodes + a cluster can bring online to handle workloads. + Set this value to enable dynamic node creation + and limit the number of additional nodes the + cluster can bring online. Leave empty if you do + not want the cluster to create nodes + dynamically, and instead rely only on static + nodes. + """ + + compute_instance: "ComputeInstanceSlurmNodeSet" = proto.Field( + proto.MESSAGE, + number=17, + oneof="type", + message="ComputeInstanceSlurmNodeSet", + ) + id: str = proto.Field( + proto.STRING, + number=1, + ) + compute_id: str = proto.Field( + proto.STRING, + number=16, + ) + storage_configs: MutableSequence["StorageConfig"] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message="StorageConfig", + ) + static_node_count: int = proto.Field( + proto.INT64, + number=4, + ) + max_dynamic_node_count: int = proto.Field( + proto.INT64, + number=5, + ) + + +class ComputeInstanceSlurmNodeSet(proto.Message): + r"""When set in a + [SlurmNodeSet][google.cloud.hypercomputecluster.v1.SlurmNodeSet], + indicates that the nodeset should be backed by Compute Engine VM + instances. + + Attributes: + startup_script (str): + Optional. `Startup + script `__ + to be run on each VM instance in the nodeset. Max 256KB. + labels (MutableMapping[str, str]): + Optional. + `Labels `__ + that should be applied to each VM instance in the nodeset. + boot_disk (google.cloud.hypercomputecluster_v1.types.BootDisk): + Optional. Boot disk for the compute instance + """ + + startup_script: str = proto.Field( + proto.STRING, + number=1, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + boot_disk: "BootDisk" = proto.Field( + proto.MESSAGE, + number=3, + message="BootDisk", + ) + + +class SlurmPartition(proto.Message): + r"""Configuration for Slurm partitions in the cluster. Partitions + are groups of nodesets, and are how clients specify where their + workloads should be run. + + Attributes: + id (str): + Required. ID of the partition, which is how users will + identify it. Must conform to + `RFC-1034 `__ + (lower-case, alphanumeric, and at most 63 characters). + node_set_ids (MutableSequence[str]): + Required. IDs of the nodesets that make up this partition. + Values must match + [SlurmNodeSet.id][google.cloud.hypercomputecluster.v1.SlurmNodeSet.id]. + """ + + id: str = proto.Field( + proto.STRING, + number=1, + ) + node_set_ids: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class SlurmLoginNodes(proto.Message): + r"""Configuration for Slurm `login + nodes `__ in + the cluster. Login nodes are Compute Engine VM instances that allow + users to access the cluster over SSH. + + Attributes: + count (int): + Required. Number of login node instances to + create. + zone (str): + Required. Name of the zone in which login nodes should run, + e.g., ``us-central1-a``. Must be in the same region as the + cluster, and must match the zone of any other resources + specified in the cluster. + machine_type (str): + Required. Name of the Compute Engine `machine + type `__ + to use for login nodes, e.g. ``n2-standard-2``. + startup_script (str): + Optional. `Startup + script `__ + to be run on each login node instance. Max 256KB. The script + must complete within the system-defined default timeout of 5 + minutes. For tasks that require more time, consider running + them in the background using methods such as ``&`` or + ``nohup``. + enable_os_login (bool): + Optional. Whether `OS + Login `__ + should be enabled on login node instances. + enable_public_ips (bool): + Optional. Whether login node instances should be assigned + `external IP + addresses `__. + labels (MutableMapping[str, str]): + Optional. + `Labels `__ + that should be applied to each login node instance. + storage_configs (MutableSequence[google.cloud.hypercomputecluster_v1.types.StorageConfig]): + Optional. How [storage + resources][google.cloud.hypercomputecluster.v1.StorageResource] + should be mounted on each login node. + instances (MutableSequence[google.cloud.hypercomputecluster_v1.types.ComputeInstance]): + Output only. Information about the login node + instances that were created in Compute Engine. + boot_disk (google.cloud.hypercomputecluster_v1.types.BootDisk): + Optional. Boot disk for the login node. + """ + + count: int = proto.Field( + proto.INT64, + number=3, + ) + zone: str = proto.Field( + proto.STRING, + number=2, + ) + machine_type: str = proto.Field( + proto.STRING, + number=1, + ) + startup_script: str = proto.Field( + proto.STRING, + number=5, + ) + enable_os_login: bool = proto.Field( + proto.BOOL, + number=6, + ) + enable_public_ips: bool = proto.Field( + proto.BOOL, + number=7, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + storage_configs: MutableSequence["StorageConfig"] = proto.RepeatedField( + proto.MESSAGE, + number=12, + message="StorageConfig", + ) + instances: MutableSequence["ComputeInstance"] = proto.RepeatedField( + proto.MESSAGE, + number=10, + message="ComputeInstance", + ) + boot_disk: "BootDisk" = proto.Field( + proto.MESSAGE, + number=13, + message="BootDisk", + ) + + +class StorageConfig(proto.Message): + r"""Description of how a [storage + resource][google.cloud.hypercomputecluster.v1.StorageResource] + should be mounted on a VM instance. + + Attributes: + id (str): + Required. ID of the storage resource to mount, which must + match a key in the cluster's + [storage_resources][google.cloud.hypercomputecluster.v1.Cluster.storage_resources]. + local_mount (str): + Required. A directory inside the VM instance's file system + where the storage resource should be mounted (e.g., + ``/mnt/share``). + """ + + id: str = proto.Field( + proto.STRING, + number=1, + ) + local_mount: str = proto.Field( + proto.STRING, + number=2, + ) + + +class ComputeInstance(proto.Message): + r"""Details about a Compute Engine + `instance `__. + + Attributes: + instance (str): + Output only. Name of the VM instance, in the format + ``projects/{project}/zones/{zone}/instances/{instance}``. + """ + + instance: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/types/operation_metadata.py b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/types/operation_metadata.py new file mode 100644 index 000000000000..a2ca4e67a83d --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/google/cloud/hypercomputecluster_v1/types/operation_metadata.py @@ -0,0 +1,719 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.cloud.hypercomputecluster.v1", + manifest={ + "OperationMetadata", + "OperationProgress", + "OperationStep", + "CreateNetwork", + "CreatePrivateServiceAccess", + "CreateFilestoreInstance", + "CreateStorageBucket", + "CreateLustreInstance", + "CreateOrchestrator", + "CreateNodeset", + "CreatePartition", + "CreateLoginNode", + "CheckClusterHealth", + "UpdateOrchestrator", + "UpdateNodeset", + "UpdatePartition", + "UpdateLoginNode", + "DeleteOrchestrator", + "DeleteNodeset", + "DeletePartition", + "DeleteLoginNode", + "DeleteFilestoreInstance", + "DeleteStorageBucket", + "DeleteLustreInstance", + "DeletePrivateServiceAccess", + "DeleteNetwork", + }, +) + + +class OperationMetadata(proto.Message): + r"""Represents the metadata of the long-running operation. + + Attributes: + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time the operation was + created. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time the operation finished + running. + target (str): + Output only. Server-defined resource path for + the target of the operation. + verb (str): + Output only. Name of the verb executed by the + operation. + requested_cancellation (bool): + Output only. Identifies whether the user has requested + cancellation of the operation. Operations that have been + cancelled successfully have + [google.longrunning.Operation.error][google.longrunning.Operation.error] + value with a + [google.rpc.Status.code][google.rpc.Status.code] of ``1``, + corresponding to ``Code.CANCELLED``. + api_version (str): + Output only. API version used to start the + operation. + progress (google.cloud.hypercomputecluster_v1.types.OperationProgress): + Output only. Progress of the operation. + """ + + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + target: str = proto.Field( + proto.STRING, + number=3, + ) + verb: str = proto.Field( + proto.STRING, + number=4, + ) + requested_cancellation: bool = proto.Field( + proto.BOOL, + number=5, + ) + api_version: str = proto.Field( + proto.STRING, + number=6, + ) + progress: "OperationProgress" = proto.Field( + proto.MESSAGE, + number=7, + message="OperationProgress", + ) + + +class OperationProgress(proto.Message): + r"""Message describing the progress of a cluster mutation + long-running operation. + + Attributes: + steps (MutableSequence[google.cloud.hypercomputecluster_v1.types.OperationStep]): + Output only. Steps and status of the + operation. + """ + + steps: MutableSequence["OperationStep"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="OperationStep", + ) + + +class OperationStep(proto.Message): + r"""Message describing the status of a single step in a cluster + mutation long-running operation. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + create_network (google.cloud.hypercomputecluster_v1.types.CreateNetwork): + Output only. If set, indicates that new + network creation is part of the operation. + + This field is a member of `oneof`_ ``type``. + create_private_service_access (google.cloud.hypercomputecluster_v1.types.CreatePrivateServiceAccess): + Output only. If set, indicates that new + private service access creation is part of the + operation. + + This field is a member of `oneof`_ ``type``. + create_filestore_instance (google.cloud.hypercomputecluster_v1.types.CreateFilestoreInstance): + Output only. If set, indicates that new + Filestore instance creation is part of the + operation. + + This field is a member of `oneof`_ ``type``. + create_storage_bucket (google.cloud.hypercomputecluster_v1.types.CreateStorageBucket): + Output only. If set, indicates that new Cloud + Storage bucket creation is part of the + operation. + + This field is a member of `oneof`_ ``type``. + create_lustre_instance (google.cloud.hypercomputecluster_v1.types.CreateLustreInstance): + Output only. If set, indicates that new + Lustre instance creation is part of the + operation. + + This field is a member of `oneof`_ ``type``. + create_orchestrator (google.cloud.hypercomputecluster_v1.types.CreateOrchestrator): + Output only. If set, indicates that + orchestrator creation is part of the operation. + + This field is a member of `oneof`_ ``type``. + create_nodeset (google.cloud.hypercomputecluster_v1.types.CreateNodeset): + Output only. If set, indicates that new + nodeset creation is part of the operation. + + This field is a member of `oneof`_ ``type``. + create_partition (google.cloud.hypercomputecluster_v1.types.CreatePartition): + Output only. If set, indicates that new + partition creation is part of the operation. + + This field is a member of `oneof`_ ``type``. + create_login_node (google.cloud.hypercomputecluster_v1.types.CreateLoginNode): + Output only. If set, indicates that new login + node creation is part of the operation. + + This field is a member of `oneof`_ ``type``. + check_cluster_health (google.cloud.hypercomputecluster_v1.types.CheckClusterHealth): + Output only. If set, indicates that cluster + health check is part of the operation. + + This field is a member of `oneof`_ ``type``. + update_orchestrator (google.cloud.hypercomputecluster_v1.types.UpdateOrchestrator): + Output only. If set, indicates that an + orchestrator update is part of the operation. + + This field is a member of `oneof`_ ``type``. + update_nodeset (google.cloud.hypercomputecluster_v1.types.UpdateNodeset): + Output only. If set, indicates that nodeset + update is part of the operation. + + This field is a member of `oneof`_ ``type``. + update_partition (google.cloud.hypercomputecluster_v1.types.UpdatePartition): + Output only. If set, indicates that partition + update is part of the operation. + + This field is a member of `oneof`_ ``type``. + update_login_node (google.cloud.hypercomputecluster_v1.types.UpdateLoginNode): + Output only. If set, indicates that login + node update is part of the operation. + + This field is a member of `oneof`_ ``type``. + delete_orchestrator (google.cloud.hypercomputecluster_v1.types.DeleteOrchestrator): + Output only. If set, indicates that + orchestrator deletion is part of the operation. + + This field is a member of `oneof`_ ``type``. + delete_nodeset (google.cloud.hypercomputecluster_v1.types.DeleteNodeset): + Output only. If set, indicates that nodeset + deletion is part of the operation. + + This field is a member of `oneof`_ ``type``. + delete_partition (google.cloud.hypercomputecluster_v1.types.DeletePartition): + Output only. If set, indicates that partition + deletion is part of the operation. + + This field is a member of `oneof`_ ``type``. + delete_login_node (google.cloud.hypercomputecluster_v1.types.DeleteLoginNode): + Output only. If set, indicates that login + node deletion is part of the operation. + + This field is a member of `oneof`_ ``type``. + delete_filestore_instance (google.cloud.hypercomputecluster_v1.types.DeleteFilestoreInstance): + Output only. If set, indicates that Filestore + instance deletion is part of the operation. + + This field is a member of `oneof`_ ``type``. + delete_storage_bucket (google.cloud.hypercomputecluster_v1.types.DeleteStorageBucket): + Output only. If set, indicates that Cloud + Storage bucket deletion is part of the + operation. + + This field is a member of `oneof`_ ``type``. + delete_lustre_instance (google.cloud.hypercomputecluster_v1.types.DeleteLustreInstance): + Output only. If set, indicates that Lustre + instance deletion is part of the operation. + + This field is a member of `oneof`_ ``type``. + delete_private_service_access (google.cloud.hypercomputecluster_v1.types.DeletePrivateServiceAccess): + Output only. If set, indicates that private + service access deletion is part of the + operation. + + This field is a member of `oneof`_ ``type``. + delete_network (google.cloud.hypercomputecluster_v1.types.DeleteNetwork): + Output only. If set, indicates that network + deletion is part of the operation. + + This field is a member of `oneof`_ ``type``. + state (google.cloud.hypercomputecluster_v1.types.OperationStep.State): + Output only. State of the operation step. + """ + + class State(proto.Enum): + r"""State of the operation step. + + Values: + STATE_UNSPECIFIED (0): + Unspecified state. + WAITING (1): + Initial state before step execution starts. + IN_PROGRESS (2): + Step execution is running in progress. + DONE (3): + Step execution is completed. + """ + + STATE_UNSPECIFIED = 0 + WAITING = 1 + IN_PROGRESS = 2 + DONE = 3 + + create_network: "CreateNetwork" = proto.Field( + proto.MESSAGE, + number=2, + oneof="type", + message="CreateNetwork", + ) + create_private_service_access: "CreatePrivateServiceAccess" = proto.Field( + proto.MESSAGE, + number=3, + oneof="type", + message="CreatePrivateServiceAccess", + ) + create_filestore_instance: "CreateFilestoreInstance" = proto.Field( + proto.MESSAGE, + number=4, + oneof="type", + message="CreateFilestoreInstance", + ) + create_storage_bucket: "CreateStorageBucket" = proto.Field( + proto.MESSAGE, + number=5, + oneof="type", + message="CreateStorageBucket", + ) + create_lustre_instance: "CreateLustreInstance" = proto.Field( + proto.MESSAGE, + number=6, + oneof="type", + message="CreateLustreInstance", + ) + create_orchestrator: "CreateOrchestrator" = proto.Field( + proto.MESSAGE, + number=8, + oneof="type", + message="CreateOrchestrator", + ) + create_nodeset: "CreateNodeset" = proto.Field( + proto.MESSAGE, + number=9, + oneof="type", + message="CreateNodeset", + ) + create_partition: "CreatePartition" = proto.Field( + proto.MESSAGE, + number=10, + oneof="type", + message="CreatePartition", + ) + create_login_node: "CreateLoginNode" = proto.Field( + proto.MESSAGE, + number=11, + oneof="type", + message="CreateLoginNode", + ) + check_cluster_health: "CheckClusterHealth" = proto.Field( + proto.MESSAGE, + number=12, + oneof="type", + message="CheckClusterHealth", + ) + update_orchestrator: "UpdateOrchestrator" = proto.Field( + proto.MESSAGE, + number=13, + oneof="type", + message="UpdateOrchestrator", + ) + update_nodeset: "UpdateNodeset" = proto.Field( + proto.MESSAGE, + number=14, + oneof="type", + message="UpdateNodeset", + ) + update_partition: "UpdatePartition" = proto.Field( + proto.MESSAGE, + number=15, + oneof="type", + message="UpdatePartition", + ) + update_login_node: "UpdateLoginNode" = proto.Field( + proto.MESSAGE, + number=16, + oneof="type", + message="UpdateLoginNode", + ) + delete_orchestrator: "DeleteOrchestrator" = proto.Field( + proto.MESSAGE, + number=18, + oneof="type", + message="DeleteOrchestrator", + ) + delete_nodeset: "DeleteNodeset" = proto.Field( + proto.MESSAGE, + number=19, + oneof="type", + message="DeleteNodeset", + ) + delete_partition: "DeletePartition" = proto.Field( + proto.MESSAGE, + number=20, + oneof="type", + message="DeletePartition", + ) + delete_login_node: "DeleteLoginNode" = proto.Field( + proto.MESSAGE, + number=21, + oneof="type", + message="DeleteLoginNode", + ) + delete_filestore_instance: "DeleteFilestoreInstance" = proto.Field( + proto.MESSAGE, + number=22, + oneof="type", + message="DeleteFilestoreInstance", + ) + delete_storage_bucket: "DeleteStorageBucket" = proto.Field( + proto.MESSAGE, + number=23, + oneof="type", + message="DeleteStorageBucket", + ) + delete_lustre_instance: "DeleteLustreInstance" = proto.Field( + proto.MESSAGE, + number=24, + oneof="type", + message="DeleteLustreInstance", + ) + delete_private_service_access: "DeletePrivateServiceAccess" = proto.Field( + proto.MESSAGE, + number=25, + oneof="type", + message="DeletePrivateServiceAccess", + ) + delete_network: "DeleteNetwork" = proto.Field( + proto.MESSAGE, + number=26, + oneof="type", + message="DeleteNetwork", + ) + state: State = proto.Field( + proto.ENUM, + number=1, + enum=State, + ) + + +class CreateNetwork(proto.Message): + r"""When set in OperationStep, indicates that a new network + should be created. + + Attributes: + network (str): + Output only. Name of the network to create, in the format + ``projects/{project}/global/networks/{network}``. + """ + + network: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CreatePrivateServiceAccess(proto.Message): + r"""When set in OperationStep, indicates that a new private + service access should be created. + + """ + + +class CreateFilestoreInstance(proto.Message): + r"""When set in OperationStep, indicates that a new filestore + instance should be created. + + Attributes: + filestore (str): + Output only. Name of the Filestore instance, in the format + ``projects/{project}/locations/{location}/instances/{instance}`` + """ + + filestore: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateStorageBucket(proto.Message): + r"""When set in OperationStep, indicates that a new storage + bucket should be created. + + Attributes: + bucket (str): + Output only. Name of the bucket. + """ + + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateLustreInstance(proto.Message): + r"""When set in OperationStep, indicates that a new lustre + instance should be created. + + Attributes: + lustre (str): + Output only. Name of the Managed Lustre instance, in the + format + ``projects/{project}/locations/{location}/instances/{instance}`` + """ + + lustre: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateOrchestrator(proto.Message): + r"""When set in OperationStep, indicates that an orchestrator + should be created. + + """ + + +class CreateNodeset(proto.Message): + r"""When set in OperationStep, indicates that a nodeset should be + created. + + Attributes: + nodesets (MutableSequence[str]): + Output only. Name of the nodeset to create + """ + + nodesets: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class CreatePartition(proto.Message): + r"""When set in OperationStep, indicates that a partition should + be created. + + Attributes: + partitions (MutableSequence[str]): + Output only. Name of the partition to create + """ + + partitions: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class CreateLoginNode(proto.Message): + r"""When set in OperationStep, indicates that a login node should + be created. + + """ + + +class CheckClusterHealth(proto.Message): + r"""When set in OperationStep, indicates that cluster health + check should be performed. + + """ + + +class UpdateOrchestrator(proto.Message): + r"""When set in OperationStep, indicates that an orchestrator + should be updated. + + """ + + +class UpdateNodeset(proto.Message): + r"""When set in OperationStep, indicates that a nodeset should be + updated. + + Attributes: + nodesets (MutableSequence[str]): + Output only. Name of the nodeset to update + """ + + nodesets: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class UpdatePartition(proto.Message): + r"""When set in OperationStep, indicates that a partition should + be updated. + + Attributes: + partitions (MutableSequence[str]): + Output only. Name of the partition to update + """ + + partitions: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class UpdateLoginNode(proto.Message): + r"""When set in OperationStep, indicates that a login node should + be updated. + + """ + + +class DeleteOrchestrator(proto.Message): + r"""When set in OperationStep, indicates that an orchestrator + should be deleted. + + """ + + +class DeleteNodeset(proto.Message): + r"""When set in OperationStep, indicates that a nodeset should be + deleted. + + Attributes: + nodesets (MutableSequence[str]): + Output only. Name of the nodeset to delete + """ + + nodesets: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class DeletePartition(proto.Message): + r"""When set in OperationStep, indicates that a partition should + be deleted. + + Attributes: + partitions (MutableSequence[str]): + Output only. Name of the partition to delete + """ + + partitions: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class DeleteLoginNode(proto.Message): + r"""When set in OperationStep, indicates that a login node should + be deleted. + + """ + + +class DeleteFilestoreInstance(proto.Message): + r"""When set in OperationStep, indicates that a Filestore + instance should be deleted. + + Attributes: + filestore (str): + Output only. Name of the Filestore instance, in the format + ``projects/{project}/locations/{location}/instances/{instance}`` + """ + + filestore: str = proto.Field( + proto.STRING, + number=1, + ) + + +class DeleteStorageBucket(proto.Message): + r"""When set in OperationStep, indicates that Cloud Storage + bucket should be deleted. + + Attributes: + bucket (str): + Output only. Name of the bucket. + """ + + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + + +class DeleteLustreInstance(proto.Message): + r"""When set in OperationStep, indicates that a Lustre instance + should be deleted. + + Attributes: + lustre (str): + Output only. Name of the Managed Lustre instance, in the + format + ``projects/{project}/locations/{location}/instances/{instance}`` + """ + + lustre: str = proto.Field( + proto.STRING, + number=1, + ) + + +class DeletePrivateServiceAccess(proto.Message): + r"""When set in OperationStep, indicates private service access + deletion step. + + """ + + +class DeleteNetwork(proto.Message): + r"""When set in OperationStep, indicates network deletion step + with the resource name. + + Attributes: + network (str): + Output only. Name of the network to delete, in the format + ``projects/{project}/global/networks/{network}``. + """ + + network: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_create_cluster_async.py b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_create_cluster_async.py new file mode 100644 index 000000000000..b70c140fea3d --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_create_cluster_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-hypercomputecluster + + +# [START hypercomputecluster_v1_generated_HypercomputeCluster_CreateCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import hypercomputecluster_v1 + + +async def sample_create_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterAsyncClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END hypercomputecluster_v1_generated_HypercomputeCluster_CreateCluster_async] diff --git a/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_create_cluster_sync.py b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_create_cluster_sync.py new file mode 100644 index 000000000000..a6f4eb407454 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_create_cluster_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-hypercomputecluster + + +# [START hypercomputecluster_v1_generated_HypercomputeCluster_CreateCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import hypercomputecluster_v1 + + +def sample_create_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END hypercomputecluster_v1_generated_HypercomputeCluster_CreateCluster_sync] diff --git a/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_delete_cluster_async.py b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_delete_cluster_async.py new file mode 100644 index 000000000000..a73cf4953c4c --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_delete_cluster_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-hypercomputecluster + + +# [START hypercomputecluster_v1_generated_HypercomputeCluster_DeleteCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import hypercomputecluster_v1 + + +async def sample_delete_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterAsyncClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END hypercomputecluster_v1_generated_HypercomputeCluster_DeleteCluster_async] diff --git a/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_delete_cluster_sync.py b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_delete_cluster_sync.py new file mode 100644 index 000000000000..879686bda0f0 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_delete_cluster_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-hypercomputecluster + + +# [START hypercomputecluster_v1_generated_HypercomputeCluster_DeleteCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import hypercomputecluster_v1 + + +def sample_delete_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END hypercomputecluster_v1_generated_HypercomputeCluster_DeleteCluster_sync] diff --git a/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_get_cluster_async.py b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_get_cluster_async.py new file mode 100644 index 000000000000..4e26aab55257 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_get_cluster_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-hypercomputecluster + + +# [START hypercomputecluster_v1_generated_HypercomputeCluster_GetCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import hypercomputecluster_v1 + + +async def sample_get_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterAsyncClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = await client.get_cluster(request=request) + + # Handle the response + print(response) + + +# [END hypercomputecluster_v1_generated_HypercomputeCluster_GetCluster_async] diff --git a/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_get_cluster_sync.py b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_get_cluster_sync.py new file mode 100644 index 000000000000..7e8431256cd9 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_get_cluster_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-hypercomputecluster + + +# [START hypercomputecluster_v1_generated_HypercomputeCluster_GetCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import hypercomputecluster_v1 + + +def sample_get_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = client.get_cluster(request=request) + + # Handle the response + print(response) + + +# [END hypercomputecluster_v1_generated_HypercomputeCluster_GetCluster_sync] diff --git a/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_list_clusters_async.py b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_list_clusters_async.py new file mode 100644 index 000000000000..0e6031b04b3b --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_list_clusters_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListClusters +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-hypercomputecluster + + +# [START hypercomputecluster_v1_generated_HypercomputeCluster_ListClusters_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import hypercomputecluster_v1 + + +async def sample_list_clusters(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterAsyncClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_clusters(request=request) + + # Handle the response + async for response in page_result: + print(response) + + +# [END hypercomputecluster_v1_generated_HypercomputeCluster_ListClusters_async] diff --git a/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_list_clusters_sync.py b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_list_clusters_sync.py new file mode 100644 index 000000000000..e858c12fff01 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_list_clusters_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListClusters +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-hypercomputecluster + + +# [START hypercomputecluster_v1_generated_HypercomputeCluster_ListClusters_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import hypercomputecluster_v1 + + +def sample_list_clusters(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_clusters(request=request) + + # Handle the response + for response in page_result: + print(response) + + +# [END hypercomputecluster_v1_generated_HypercomputeCluster_ListClusters_sync] diff --git a/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_update_cluster_async.py b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_update_cluster_async.py new file mode 100644 index 000000000000..51a237cca1f3 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_update_cluster_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-hypercomputecluster + + +# [START hypercomputecluster_v1_generated_HypercomputeCluster_UpdateCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import hypercomputecluster_v1 + + +async def sample_update_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterAsyncClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.UpdateClusterRequest() + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END hypercomputecluster_v1_generated_HypercomputeCluster_UpdateCluster_async] diff --git a/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_update_cluster_sync.py b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_update_cluster_sync.py new file mode 100644 index 000000000000..e8ae070f25d5 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/samples/generated_samples/hypercomputecluster_v1_generated_hypercompute_cluster_update_cluster_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-hypercomputecluster + + +# [START hypercomputecluster_v1_generated_HypercomputeCluster_UpdateCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import hypercomputecluster_v1 + + +def sample_update_cluster(): + # Create a client + client = hypercomputecluster_v1.HypercomputeClusterClient() + + # Initialize request argument(s) + request = hypercomputecluster_v1.UpdateClusterRequest() + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END hypercomputecluster_v1_generated_HypercomputeCluster_UpdateCluster_sync] diff --git a/packages/google-cloud-hypercomputecluster/samples/generated_samples/snippet_metadata_google.cloud.hypercomputecluster.v1.json b/packages/google-cloud-hypercomputecluster/samples/generated_samples/snippet_metadata_google.cloud.hypercomputecluster.v1.json new file mode 100644 index 000000000000..034cc5360ada --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/samples/generated_samples/snippet_metadata_google.cloud.hypercomputecluster.v1.json @@ -0,0 +1,844 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.cloud.hypercomputecluster.v1", + "version": "v1" + } + ], + "language": "PYTHON", + "name": "google-cloud-hypercomputecluster", + "version": "0.2.0" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient", + "shortName": "HypercomputeClusterAsyncClient" + }, + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient.create_cluster", + "method": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster.CreateCluster", + "service": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "shortName": "HypercomputeCluster" + }, + "shortName": "CreateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.hypercomputecluster_v1.types.CreateClusterRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "cluster", + "type": "google.cloud.hypercomputecluster_v1.types.Cluster" + }, + { + "name": "cluster_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_cluster" + }, + "description": "Sample for CreateCluster", + "file": "hypercomputecluster_v1_generated_hypercompute_cluster_create_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "hypercomputecluster_v1_generated_HypercomputeCluster_CreateCluster_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "hypercomputecluster_v1_generated_hypercompute_cluster_create_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterClient", + "shortName": "HypercomputeClusterClient" + }, + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.create_cluster", + "method": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster.CreateCluster", + "service": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "shortName": "HypercomputeCluster" + }, + "shortName": "CreateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.hypercomputecluster_v1.types.CreateClusterRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "cluster", + "type": "google.cloud.hypercomputecluster_v1.types.Cluster" + }, + { + "name": "cluster_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_cluster" + }, + "description": "Sample for CreateCluster", + "file": "hypercomputecluster_v1_generated_hypercompute_cluster_create_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "hypercomputecluster_v1_generated_HypercomputeCluster_CreateCluster_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "hypercomputecluster_v1_generated_hypercompute_cluster_create_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient", + "shortName": "HypercomputeClusterAsyncClient" + }, + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient.delete_cluster", + "method": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster.DeleteCluster", + "service": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "shortName": "HypercomputeCluster" + }, + "shortName": "DeleteCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.hypercomputecluster_v1.types.DeleteClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_cluster" + }, + "description": "Sample for DeleteCluster", + "file": "hypercomputecluster_v1_generated_hypercompute_cluster_delete_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "hypercomputecluster_v1_generated_HypercomputeCluster_DeleteCluster_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "hypercomputecluster_v1_generated_hypercompute_cluster_delete_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterClient", + "shortName": "HypercomputeClusterClient" + }, + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.delete_cluster", + "method": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster.DeleteCluster", + "service": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "shortName": "HypercomputeCluster" + }, + "shortName": "DeleteCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.hypercomputecluster_v1.types.DeleteClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_cluster" + }, + "description": "Sample for DeleteCluster", + "file": "hypercomputecluster_v1_generated_hypercompute_cluster_delete_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "hypercomputecluster_v1_generated_HypercomputeCluster_DeleteCluster_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "hypercomputecluster_v1_generated_hypercompute_cluster_delete_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient", + "shortName": "HypercomputeClusterAsyncClient" + }, + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient.get_cluster", + "method": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster.GetCluster", + "service": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "shortName": "HypercomputeCluster" + }, + "shortName": "GetCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.hypercomputecluster_v1.types.GetClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.hypercomputecluster_v1.types.Cluster", + "shortName": "get_cluster" + }, + "description": "Sample for GetCluster", + "file": "hypercomputecluster_v1_generated_hypercompute_cluster_get_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "hypercomputecluster_v1_generated_HypercomputeCluster_GetCluster_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "hypercomputecluster_v1_generated_hypercompute_cluster_get_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterClient", + "shortName": "HypercomputeClusterClient" + }, + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.get_cluster", + "method": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster.GetCluster", + "service": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "shortName": "HypercomputeCluster" + }, + "shortName": "GetCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.hypercomputecluster_v1.types.GetClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.hypercomputecluster_v1.types.Cluster", + "shortName": "get_cluster" + }, + "description": "Sample for GetCluster", + "file": "hypercomputecluster_v1_generated_hypercompute_cluster_get_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "hypercomputecluster_v1_generated_HypercomputeCluster_GetCluster_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "hypercomputecluster_v1_generated_hypercompute_cluster_get_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient", + "shortName": "HypercomputeClusterAsyncClient" + }, + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient.list_clusters", + "method": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster.ListClusters", + "service": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "shortName": "HypercomputeCluster" + }, + "shortName": "ListClusters" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.hypercomputecluster_v1.types.ListClustersRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.hypercomputecluster_v1.services.hypercompute_cluster.pagers.ListClustersAsyncPager", + "shortName": "list_clusters" + }, + "description": "Sample for ListClusters", + "file": "hypercomputecluster_v1_generated_hypercompute_cluster_list_clusters_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "hypercomputecluster_v1_generated_HypercomputeCluster_ListClusters_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "hypercomputecluster_v1_generated_hypercompute_cluster_list_clusters_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterClient", + "shortName": "HypercomputeClusterClient" + }, + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.list_clusters", + "method": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster.ListClusters", + "service": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "shortName": "HypercomputeCluster" + }, + "shortName": "ListClusters" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.hypercomputecluster_v1.types.ListClustersRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.hypercomputecluster_v1.services.hypercompute_cluster.pagers.ListClustersPager", + "shortName": "list_clusters" + }, + "description": "Sample for ListClusters", + "file": "hypercomputecluster_v1_generated_hypercompute_cluster_list_clusters_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "hypercomputecluster_v1_generated_HypercomputeCluster_ListClusters_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "hypercomputecluster_v1_generated_hypercompute_cluster_list_clusters_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient", + "shortName": "HypercomputeClusterAsyncClient" + }, + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterAsyncClient.update_cluster", + "method": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster.UpdateCluster", + "service": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "shortName": "HypercomputeCluster" + }, + "shortName": "UpdateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.hypercomputecluster_v1.types.UpdateClusterRequest" + }, + { + "name": "cluster", + "type": "google.cloud.hypercomputecluster_v1.types.Cluster" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_cluster" + }, + "description": "Sample for UpdateCluster", + "file": "hypercomputecluster_v1_generated_hypercompute_cluster_update_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "hypercomputecluster_v1_generated_HypercomputeCluster_UpdateCluster_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "hypercomputecluster_v1_generated_hypercompute_cluster_update_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterClient", + "shortName": "HypercomputeClusterClient" + }, + "fullName": "google.cloud.hypercomputecluster_v1.HypercomputeClusterClient.update_cluster", + "method": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster.UpdateCluster", + "service": { + "fullName": "google.cloud.hypercomputecluster.v1.HypercomputeCluster", + "shortName": "HypercomputeCluster" + }, + "shortName": "UpdateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.hypercomputecluster_v1.types.UpdateClusterRequest" + }, + { + "name": "cluster", + "type": "google.cloud.hypercomputecluster_v1.types.Cluster" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_cluster" + }, + "description": "Sample for UpdateCluster", + "file": "hypercomputecluster_v1_generated_hypercompute_cluster_update_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "hypercomputecluster_v1_generated_HypercomputeCluster_UpdateCluster_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "hypercomputecluster_v1_generated_hypercompute_cluster_update_cluster_sync.py" + } + ] +} diff --git a/packages/google-cloud-hypercomputecluster/tests/unit/gapic/hypercomputecluster_v1/__init__.py b/packages/google-cloud-hypercomputecluster/tests/unit/gapic/hypercomputecluster_v1/__init__.py new file mode 100644 index 000000000000..cbf94b283c70 --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/tests/unit/gapic/hypercomputecluster_v1/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-hypercomputecluster/tests/unit/gapic/hypercomputecluster_v1/test_hypercompute_cluster.py b/packages/google-cloud-hypercomputecluster/tests/unit/gapic/hypercomputecluster_v1/test_hypercompute_cluster.py new file mode 100644 index 000000000000..78012607e8ab --- /dev/null +++ b/packages/google-cloud-hypercomputecluster/tests/unit/gapic/hypercomputecluster_v1/test_hypercompute_cluster.py @@ -0,0 +1,7808 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import json +import math +from collections.abc import AsyncIterable, Iterable, Mapping, Sequence + +import grpc +import pytest +from google.api_core import api_core_version +from google.protobuf import json_format +from grpc.experimental import aio +from proto.marshal.rules import wrappers +from proto.marshal.rules.dates import DurationRule, TimestampRule +from requests import PreparedRequest, Request, Response +from requests.sessions import Session + +try: + from google.auth.aio import credentials as ga_credentials_async + + HAS_GOOGLE_AUTH_AIO = True +except ImportError: # pragma: NO COVER + HAS_GOOGLE_AUTH_AIO = False + +import google.api_core.operation_async as operation_async # type: ignore +import google.auth +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +from google.api_core import ( + client_options, + future, + gapic_v1, + grpc_helpers, + grpc_helpers_async, + operation, + operations_v1, + path_template, +) +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.location import locations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account + +from google.cloud.hypercomputecluster_v1.services.hypercompute_cluster import ( + HypercomputeClusterAsyncClient, + HypercomputeClusterClient, + pagers, + transports, +) +from google.cloud.hypercomputecluster_v1.types import ( + hypercompute_cluster, + operation_metadata, +) + +CRED_INFO_JSON = { + "credential_source": "/path/to/file", + "credential_type": "service account credentials", + "principal": "service-account@example.com", +} +CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) + + +async def mock_async_gen(data, chunk_size=1): + for i in range(0, len(data)): # pragma: NO COVER + chunk = data[i : i + chunk_size] + yield chunk.encode("utf-8") + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded. +# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107. +def async_anonymous_credentials(): + if HAS_GOOGLE_AUTH_AIO: + return ga_credentials_async.AnonymousCredentials() + return ga_credentials.AnonymousCredentials() + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert HypercomputeClusterClient._get_default_mtls_endpoint(None) is None + assert ( + HypercomputeClusterClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + HypercomputeClusterClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + HypercomputeClusterClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + HypercomputeClusterClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + HypercomputeClusterClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test__read_environment_variables(): + assert HypercomputeClusterClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert HypercomputeClusterClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert HypercomputeClusterClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with pytest.raises(ValueError) as excinfo: + HypercomputeClusterClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + else: + assert HypercomputeClusterClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert HypercomputeClusterClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert HypercomputeClusterClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert HypercomputeClusterClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + HypercomputeClusterClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert HypercomputeClusterClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test_use_client_cert_effective(): + # Test case 1: Test when `should_use_client_cert` returns True. + # We mock the `should_use_client_cert` function to simulate a scenario where + # the google-auth library supports automatic mTLS and determines that a + # client certificate should be used. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch( + "google.auth.transport.mtls.should_use_client_cert", return_value=True + ): + assert HypercomputeClusterClient._use_client_cert_effective() is True + + # Test case 2: Test when `should_use_client_cert` returns False. + # We mock the `should_use_client_cert` function to simulate a scenario where + # the google-auth library supports automatic mTLS and determines that a + # client certificate should NOT be used. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch( + "google.auth.transport.mtls.should_use_client_cert", return_value=False + ): + assert HypercomputeClusterClient._use_client_cert_effective() is False + + # Test case 3: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "true". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert HypercomputeClusterClient._use_client_cert_effective() is True + + # Test case 4: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "false". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"} + ): + assert HypercomputeClusterClient._use_client_cert_effective() is False + + # Test case 5: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "True". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "True"}): + assert HypercomputeClusterClient._use_client_cert_effective() is True + + # Test case 6: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "False". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "False"} + ): + assert HypercomputeClusterClient._use_client_cert_effective() is False + + # Test case 7: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "TRUE". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "TRUE"}): + assert HypercomputeClusterClient._use_client_cert_effective() is True + + # Test case 8: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "FALSE". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "FALSE"} + ): + assert HypercomputeClusterClient._use_client_cert_effective() is False + + # Test case 9: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not set. + # In this case, the method should return False, which is the default value. + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, clear=True): + assert HypercomputeClusterClient._use_client_cert_effective() is False + + # Test case 10: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value. + # The method should raise a ValueError as the environment variable must be either + # "true" or "false". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"} + ): + with pytest.raises(ValueError): + HypercomputeClusterClient._use_client_cert_effective() + + # Test case 11: Test when `should_use_client_cert` is available and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value. + # The method should return False as the environment variable is set to an invalid value. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"} + ): + assert HypercomputeClusterClient._use_client_cert_effective() is False + + # Test case 12: Test when `should_use_client_cert` is available and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is unset. Also, + # the GOOGLE_API_CONFIG environment variable is unset. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": ""}): + with mock.patch.dict(os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": ""}): + assert HypercomputeClusterClient._use_client_cert_effective() is False + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert HypercomputeClusterClient._get_client_cert_source(None, False) is None + assert ( + HypercomputeClusterClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + HypercomputeClusterClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + HypercomputeClusterClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + HypercomputeClusterClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + HypercomputeClusterClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(HypercomputeClusterClient), +) +@mock.patch.object( + HypercomputeClusterAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(HypercomputeClusterAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = HypercomputeClusterClient._DEFAULT_UNIVERSE + default_endpoint = HypercomputeClusterClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = HypercomputeClusterClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + HypercomputeClusterClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + HypercomputeClusterClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == HypercomputeClusterClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + HypercomputeClusterClient._get_api_endpoint( + None, None, default_universe, "auto" + ) + == default_endpoint + ) + assert ( + HypercomputeClusterClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == HypercomputeClusterClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + HypercomputeClusterClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == HypercomputeClusterClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + HypercomputeClusterClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + HypercomputeClusterClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + HypercomputeClusterClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + HypercomputeClusterClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + HypercomputeClusterClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + HypercomputeClusterClient._get_universe_domain(None, None) + == HypercomputeClusterClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + HypercomputeClusterClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "error_code,cred_info_json,show_cred_info", + [ + (401, CRED_INFO_JSON, True), + (403, CRED_INFO_JSON, True), + (404, CRED_INFO_JSON, True), + (500, CRED_INFO_JSON, False), + (401, None, False), + (403, None, False), + (404, None, False), + (500, None, False), + ], +) +def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): + cred = mock.Mock(["get_cred_info"]) + cred.get_cred_info = mock.Mock(return_value=cred_info_json) + client = HypercomputeClusterClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=["foo"]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + if show_cred_info: + assert error.details == ["foo", CRED_INFO_STRING] + else: + assert error.details == ["foo"] + + +@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) +def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): + cred = mock.Mock([]) + assert not hasattr(cred, "get_cred_info") + client = HypercomputeClusterClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=[]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + assert error.details == [] + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (HypercomputeClusterClient, "grpc"), + (HypercomputeClusterAsyncClient, "grpc_asyncio"), + (HypercomputeClusterClient, "rest"), + ], +) +def test_hypercompute_cluster_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "hypercomputecluster.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://hypercomputecluster.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.HypercomputeClusterGrpcTransport, "grpc"), + (transports.HypercomputeClusterGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.HypercomputeClusterRestTransport, "rest"), + ], +) +def test_hypercompute_cluster_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (HypercomputeClusterClient, "grpc"), + (HypercomputeClusterAsyncClient, "grpc_asyncio"), + (HypercomputeClusterClient, "rest"), + ], +) +def test_hypercompute_cluster_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "hypercomputecluster.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://hypercomputecluster.googleapis.com" + ) + + +def test_hypercompute_cluster_client_get_transport_class(): + transport = HypercomputeClusterClient.get_transport_class() + available_transports = [ + transports.HypercomputeClusterGrpcTransport, + transports.HypercomputeClusterRestTransport, + ] + assert transport in available_transports + + transport = HypercomputeClusterClient.get_transport_class("grpc") + assert transport == transports.HypercomputeClusterGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + HypercomputeClusterClient, + transports.HypercomputeClusterGrpcTransport, + "grpc", + ), + ( + HypercomputeClusterAsyncClient, + transports.HypercomputeClusterGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + HypercomputeClusterClient, + transports.HypercomputeClusterRestTransport, + "rest", + ), + ], +) +@mock.patch.object( + HypercomputeClusterClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(HypercomputeClusterClient), +) +@mock.patch.object( + HypercomputeClusterAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(HypercomputeClusterAsyncClient), +) +def test_hypercompute_cluster_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(HypercomputeClusterClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(HypercomputeClusterClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + HypercomputeClusterClient, + transports.HypercomputeClusterGrpcTransport, + "grpc", + "true", + ), + ( + HypercomputeClusterAsyncClient, + transports.HypercomputeClusterGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + HypercomputeClusterClient, + transports.HypercomputeClusterGrpcTransport, + "grpc", + "false", + ), + ( + HypercomputeClusterAsyncClient, + transports.HypercomputeClusterGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + HypercomputeClusterClient, + transports.HypercomputeClusterRestTransport, + "rest", + "true", + ), + ( + HypercomputeClusterClient, + transports.HypercomputeClusterRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + HypercomputeClusterClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(HypercomputeClusterClient), +) +@mock.patch.object( + HypercomputeClusterAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(HypercomputeClusterAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_hypercompute_cluster_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [HypercomputeClusterClient, HypercomputeClusterAsyncClient] +) +@mock.patch.object( + HypercomputeClusterClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(HypercomputeClusterClient), +) +@mock.patch.object( + HypercomputeClusterAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(HypercomputeClusterAsyncClient), +) +def test_hypercompute_cluster_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "Unsupported". + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, + api_endpoint=mock_api_endpoint, + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset. + test_cases = [ + ( + # With workloads present in config, mTLS is enabled. + { + "version": 1, + "cert_configs": { + "workload": { + "cert_path": "path/to/cert/file", + "key_path": "path/to/key/file", + } + }, + }, + mock_client_cert_source, + ), + ( + # With workloads not present in config, mTLS is disabled. + { + "version": 1, + "cert_configs": {}, + }, + None, + ), + ] + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + for config_data, expected_cert_source in test_cases: + env = os.environ.copy() + env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", None) + with mock.patch.dict(os.environ, env, clear=True): + config_filename = "mock_certificate_config.json" + config_file_content = json.dumps(config_data) + m = mock.mock_open(read_data=config_file_content) + with mock.patch("builtins.open", m): + with mock.patch.dict( + os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename} + ): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, + api_endpoint=mock_api_endpoint, + ) + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source(options) + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is expected_cert_source + + # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset(empty). + test_cases = [ + ( + # With workloads present in config, mTLS is enabled. + { + "version": 1, + "cert_configs": { + "workload": { + "cert_path": "path/to/cert/file", + "key_path": "path/to/key/file", + } + }, + }, + mock_client_cert_source, + ), + ( + # With workloads not present in config, mTLS is disabled. + { + "version": 1, + "cert_configs": {}, + }, + None, + ), + ] + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + for config_data, expected_cert_source in test_cases: + env = os.environ.copy() + env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", "") + with mock.patch.dict(os.environ, env, clear=True): + config_filename = "mock_certificate_config.json" + config_file_content = json.dumps(config_data) + m = mock.mock_open(read_data=config_file_content) + with mock.patch("builtins.open", m): + with mock.patch.dict( + os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename} + ): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, + api_endpoint=mock_api_endpoint, + ) + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source(options) + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is expected_cert_source + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + +@pytest.mark.parametrize( + "client_class", [HypercomputeClusterClient, HypercomputeClusterAsyncClient] +) +@mock.patch.object( + HypercomputeClusterClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(HypercomputeClusterClient), +) +@mock.patch.object( + HypercomputeClusterAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(HypercomputeClusterAsyncClient), +) +def test_hypercompute_cluster_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = HypercomputeClusterClient._DEFAULT_UNIVERSE + default_endpoint = HypercomputeClusterClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = HypercomputeClusterClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + HypercomputeClusterClient, + transports.HypercomputeClusterGrpcTransport, + "grpc", + ), + ( + HypercomputeClusterAsyncClient, + transports.HypercomputeClusterGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + HypercomputeClusterClient, + transports.HypercomputeClusterRestTransport, + "rest", + ), + ], +) +def test_hypercompute_cluster_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + HypercomputeClusterClient, + transports.HypercomputeClusterGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + HypercomputeClusterAsyncClient, + transports.HypercomputeClusterGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + HypercomputeClusterClient, + transports.HypercomputeClusterRestTransport, + "rest", + None, + ), + ], +) +def test_hypercompute_cluster_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_hypercompute_cluster_client_client_options_from_dict(): + with mock.patch( + "google.cloud.hypercomputecluster_v1.services.hypercompute_cluster.transports.HypercomputeClusterGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = HypercomputeClusterClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + HypercomputeClusterClient, + transports.HypercomputeClusterGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + HypercomputeClusterAsyncClient, + transports.HypercomputeClusterGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_hypercompute_cluster_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object(grpc_helpers, "create_channel") as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "hypercomputecluster.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="hypercomputecluster.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + hypercompute_cluster.ListClustersRequest, + dict, + ], +) +def test_list_clusters(request_type, transport: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = hypercompute_cluster.ListClustersResponse( + next_page_token="next_page_token_value", + unreachable=["unreachable_value"], + ) + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = hypercompute_cluster.ListClustersRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersPager) + assert response.next_page_token == "next_page_token_value" + assert response.unreachable == ["unreachable_value"] + + +def test_list_clusters_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = hypercompute_cluster.ListClustersRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_clusters(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == hypercompute_cluster.ListClustersRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", + ) + + +def test_list_clusters_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_clusters in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_clusters] = mock_rpc + request = {} + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_clusters_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_clusters + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_clusters + ] = mock_rpc + + request = {} + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_clusters_async( + transport: str = "grpc_asyncio", + request_type=hypercompute_cluster.ListClustersRequest, +): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + hypercompute_cluster.ListClustersResponse( + next_page_token="next_page_token_value", + unreachable=["unreachable_value"], + ) + ) + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = hypercompute_cluster.ListClustersRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersAsyncPager) + assert response.next_page_token == "next_page_token_value" + assert response.unreachable == ["unreachable_value"] + + +@pytest.mark.asyncio +async def test_list_clusters_async_from_dict(): + await test_list_clusters_async(request_type=dict) + + +def test_list_clusters_field_headers(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = hypercompute_cluster.ListClustersRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value = hypercompute_cluster.ListClustersResponse() + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_clusters_field_headers_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = hypercompute_cluster.ListClustersRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + hypercompute_cluster.ListClustersResponse() + ) + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_clusters_flattened(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = hypercompute_cluster.ListClustersResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_clusters_flattened_error(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + hypercompute_cluster.ListClustersRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = hypercompute_cluster.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + hypercompute_cluster.ListClustersResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + hypercompute_cluster.ListClustersRequest(), + parent="parent_value", + ) + + +def test_list_clusters_pager(transport_name: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + ], + next_page_token="abc", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[], + next_page_token="def", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + ], + next_page_token="ghi", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_clusters(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, hypercompute_cluster.Cluster) for i in results) + + +def test_list_clusters_pages(transport_name: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + ], + next_page_token="abc", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[], + next_page_token="def", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + ], + next_page_token="ghi", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + ], + ), + RuntimeError, + ) + pages = list(client.list_clusters(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_clusters_async_pager(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + ], + next_page_token="abc", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[], + next_page_token="def", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + ], + next_page_token="ghi", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_clusters( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, hypercompute_cluster.Cluster) for i in responses) + + +@pytest.mark.asyncio +async def test_list_clusters_async_pages(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + ], + next_page_token="abc", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[], + next_page_token="def", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + ], + next_page_token="ghi", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_clusters(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + hypercompute_cluster.GetClusterRequest, + dict, + ], +) +def test_get_cluster(request_type, transport: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = hypercompute_cluster.Cluster( + name="name_value", + description="description_value", + reconciling=True, + ) + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = hypercompute_cluster.GetClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, hypercompute_cluster.Cluster) + assert response.name == "name_value" + assert response.description == "description_value" + assert response.reconciling is True + + +def test_get_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = hypercompute_cluster.GetClusterRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == hypercompute_cluster.GetClusterRequest( + name="name_value", + ) + + +def test_get_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_cluster] = mock_rpc + request = {} + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_cluster + ] = mock_rpc + + request = {} + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_cluster_async( + transport: str = "grpc_asyncio", request_type=hypercompute_cluster.GetClusterRequest +): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + hypercompute_cluster.Cluster( + name="name_value", + description="description_value", + reconciling=True, + ) + ) + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = hypercompute_cluster.GetClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, hypercompute_cluster.Cluster) + assert response.name == "name_value" + assert response.description == "description_value" + assert response.reconciling is True + + +@pytest.mark.asyncio +async def test_get_cluster_async_from_dict(): + await test_get_cluster_async(request_type=dict) + + +def test_get_cluster_field_headers(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = hypercompute_cluster.GetClusterRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value = hypercompute_cluster.Cluster() + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_cluster_field_headers_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = hypercompute_cluster.GetClusterRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + hypercompute_cluster.Cluster() + ) + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_cluster_flattened(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = hypercompute_cluster.Cluster() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_cluster_flattened_error(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + hypercompute_cluster.GetClusterRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = hypercompute_cluster.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + hypercompute_cluster.Cluster() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + hypercompute_cluster.GetClusterRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + hypercompute_cluster.CreateClusterRequest, + dict, + ], +) +def test_create_cluster(request_type, transport: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = hypercompute_cluster.CreateClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = hypercompute_cluster.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == hypercompute_cluster.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + +def test_create_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc + request = {} + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_cluster + ] = mock_rpc + + request = {} + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_cluster_async( + transport: str = "grpc_asyncio", + request_type=hypercompute_cluster.CreateClusterRequest, +): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = hypercompute_cluster.CreateClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_cluster_async_from_dict(): + await test_create_cluster_async(request_type=dict) + + +def test_create_cluster_field_headers(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = hypercompute_cluster.CreateClusterRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_cluster_field_headers_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = hypercompute_cluster.CreateClusterRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_cluster_flattened(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + parent="parent_value", + cluster=hypercompute_cluster.Cluster(name="name_value"), + cluster_id="cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].cluster + mock_val = hypercompute_cluster.Cluster(name="name_value") + assert arg == mock_val + arg = args[0].cluster_id + mock_val = "cluster_id_value" + assert arg == mock_val + + +def test_create_cluster_flattened_error(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + hypercompute_cluster.CreateClusterRequest(), + parent="parent_value", + cluster=hypercompute_cluster.Cluster(name="name_value"), + cluster_id="cluster_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + parent="parent_value", + cluster=hypercompute_cluster.Cluster(name="name_value"), + cluster_id="cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].cluster + mock_val = hypercompute_cluster.Cluster(name="name_value") + assert arg == mock_val + arg = args[0].cluster_id + mock_val = "cluster_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + hypercompute_cluster.CreateClusterRequest(), + parent="parent_value", + cluster=hypercompute_cluster.Cluster(name="name_value"), + cluster_id="cluster_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + hypercompute_cluster.UpdateClusterRequest, + dict, + ], +) +def test_update_cluster(request_type, transport: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = hypercompute_cluster.UpdateClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = hypercompute_cluster.UpdateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == hypercompute_cluster.UpdateClusterRequest() + + +def test_update_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc + request = {} + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_cluster + ] = mock_rpc + + request = {} + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_cluster_async( + transport: str = "grpc_asyncio", + request_type=hypercompute_cluster.UpdateClusterRequest, +): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = hypercompute_cluster.UpdateClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_cluster_async_from_dict(): + await test_update_cluster_async(request_type=dict) + + +def test_update_cluster_field_headers(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = hypercompute_cluster.UpdateClusterRequest() + + request.cluster.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "cluster.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_cluster_field_headers_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = hypercompute_cluster.UpdateClusterRequest() + + request.cluster.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "cluster.name=name_value", + ) in kw["metadata"] + + +def test_update_cluster_flattened(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_cluster( + cluster=hypercompute_cluster.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].cluster + mock_val = hypercompute_cluster.Cluster(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_cluster_flattened_error(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_cluster( + hypercompute_cluster.UpdateClusterRequest(), + cluster=hypercompute_cluster.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_cluster( + cluster=hypercompute_cluster.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].cluster + mock_val = hypercompute_cluster.Cluster(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_error_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_cluster( + hypercompute_cluster.UpdateClusterRequest(), + cluster=hypercompute_cluster.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + hypercompute_cluster.DeleteClusterRequest, + dict, + ], +) +def test_delete_cluster(request_type, transport: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = hypercompute_cluster.DeleteClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = hypercompute_cluster.DeleteClusterRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == hypercompute_cluster.DeleteClusterRequest( + name="name_value", + ) + + +def test_delete_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_cluster] = mock_rpc + request = {} + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_cluster + ] = mock_rpc + + request = {} + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.delete_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_cluster_async( + transport: str = "grpc_asyncio", + request_type=hypercompute_cluster.DeleteClusterRequest, +): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = hypercompute_cluster.DeleteClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_cluster_async_from_dict(): + await test_delete_cluster_async(request_type=dict) + + +def test_delete_cluster_field_headers(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = hypercompute_cluster.DeleteClusterRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_cluster_field_headers_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = hypercompute_cluster.DeleteClusterRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_cluster_flattened(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_cluster_flattened_error(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + hypercompute_cluster.DeleteClusterRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + hypercompute_cluster.DeleteClusterRequest(), + name="name_value", + ) + + +def test_list_clusters_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_clusters in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_clusters] = mock_rpc + + request = {} + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_clusters_rest_required_fields( + request_type=hypercompute_cluster.ListClustersRequest, +): + transport_class = transports.HypercomputeClusterRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_clusters._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_clusters._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = hypercompute_cluster.ListClustersResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = hypercompute_cluster.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_clusters(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_clusters_rest_unset_required_fields(): + transport = transports.HypercomputeClusterRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_clusters._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_clusters_rest_flattened(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = hypercompute_cluster.ListClustersResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = hypercompute_cluster.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_clusters(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/clusters" % client.transport._host, + args[1], + ) + + +def test_list_clusters_rest_flattened_error(transport: str = "rest"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + hypercompute_cluster.ListClustersRequest(), + parent="parent_value", + ) + + +def test_list_clusters_rest_pager(transport: str = "rest"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + ], + next_page_token="abc", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[], + next_page_token="def", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + ], + next_page_token="ghi", + ), + hypercompute_cluster.ListClustersResponse( + clusters=[ + hypercompute_cluster.Cluster(), + hypercompute_cluster.Cluster(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + hypercompute_cluster.ListClustersResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_clusters(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, hypercompute_cluster.Cluster) for i in results) + + pages = list(client.list_clusters(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_cluster] = mock_rpc + + request = {} + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_cluster_rest_required_fields( + request_type=hypercompute_cluster.GetClusterRequest, +): + transport_class = transports.HypercomputeClusterRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = hypercompute_cluster.Cluster() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = hypercompute_cluster.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_cluster_rest_unset_required_fields(): + transport = transports.HypercomputeClusterRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_cluster_rest_flattened(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = hypercompute_cluster.Cluster() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/locations/sample2/clusters/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = hypercompute_cluster.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/clusters/*}" % client.transport._host, + args[1], + ) + + +def test_get_cluster_rest_flattened_error(transport: str = "rest"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + hypercompute_cluster.GetClusterRequest(), + name="name_value", + ) + + +def test_create_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc + + request = {} + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_cluster_rest_required_fields( + request_type=hypercompute_cluster.CreateClusterRequest, +): + transport_class = transports.HypercomputeClusterRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["cluster_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "clusterId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "clusterId" in jsonified_request + assert jsonified_request["clusterId"] == request_init["cluster_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["clusterId"] = "cluster_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "cluster_id", + "request_id", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "clusterId" in jsonified_request + assert jsonified_request["clusterId"] == "cluster_id_value" + + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_cluster(request) + + expected_params = [ + ( + "clusterId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_cluster_rest_unset_required_fields(): + transport = transports.HypercomputeClusterRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_cluster._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "clusterId", + "requestId", + ) + ) + & set( + ( + "parent", + "clusterId", + "cluster", + ) + ) + ) + + +def test_create_cluster_rest_flattened(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + cluster=hypercompute_cluster.Cluster(name="name_value"), + cluster_id="cluster_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/clusters" % client.transport._host, + args[1], + ) + + +def test_create_cluster_rest_flattened_error(transport: str = "rest"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + hypercompute_cluster.CreateClusterRequest(), + parent="parent_value", + cluster=hypercompute_cluster.Cluster(name="name_value"), + cluster_id="cluster_id_value", + ) + + +def test_update_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc + + request = {} + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_cluster_rest_required_fields( + request_type=hypercompute_cluster.UpdateClusterRequest, +): + transport_class = transports.HypercomputeClusterRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "request_id", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_cluster_rest_unset_required_fields(): + transport = transports.HypercomputeClusterRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_cluster._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "requestId", + "updateMask", + ) + ) + & set(("cluster",)) + ) + + +def test_update_cluster_rest_flattened(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "cluster": {"name": "projects/sample1/locations/sample2/clusters/sample3"} + } + + # get truthy value for each flattened field + mock_args = dict( + cluster=hypercompute_cluster.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{cluster.name=projects/*/locations/*/clusters/*}" + % client.transport._host, + args[1], + ) + + +def test_update_cluster_rest_flattened_error(transport: str = "rest"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_cluster( + hypercompute_cluster.UpdateClusterRequest(), + cluster=hypercompute_cluster.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_cluster] = mock_rpc + + request = {} + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_cluster_rest_required_fields( + request_type=hypercompute_cluster.DeleteClusterRequest, +): + transport_class = transports.HypercomputeClusterRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("request_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_cluster_rest_unset_required_fields(): + transport = transports.HypercomputeClusterRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(("requestId",)) & set(("name",))) + + +def test_delete_cluster_rest_flattened(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/locations/sample2/clusters/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/clusters/*}" % client.transport._host, + args[1], + ) + + +def test_delete_cluster_rest_flattened_error(transport: str = "rest"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + hypercompute_cluster.DeleteClusterRequest(), + name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.HypercomputeClusterGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.HypercomputeClusterGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = HypercomputeClusterClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.HypercomputeClusterGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = HypercomputeClusterClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = HypercomputeClusterClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.HypercomputeClusterGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = HypercomputeClusterClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.HypercomputeClusterGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = HypercomputeClusterClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.HypercomputeClusterGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.HypercomputeClusterGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.HypercomputeClusterGrpcTransport, + transports.HypercomputeClusterGrpcAsyncIOTransport, + transports.HypercomputeClusterRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_kind_grpc(): + transport = HypercomputeClusterClient.get_transport_class("grpc")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "grpc" + + +def test_initialize_client_w_grpc(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_clusters_empty_call_grpc(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value = hypercompute_cluster.ListClustersResponse() + client.list_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.ListClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_cluster_empty_call_grpc(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value = hypercompute_cluster.Cluster() + client.get_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.GetClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_cluster_empty_call_grpc(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.CreateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_cluster_empty_call_grpc(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.UpdateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_cluster_empty_call_grpc(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.DeleteClusterRequest() + + assert args[0] == request_msg + + +def test_transport_kind_grpc_asyncio(): + transport = HypercomputeClusterAsyncClient.get_transport_class("grpc_asyncio")( + credentials=async_anonymous_credentials() + ) + assert transport.kind == "grpc_asyncio" + + +def test_initialize_client_w_grpc_asyncio(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_clusters_empty_call_grpc_asyncio(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + hypercompute_cluster.ListClustersResponse( + next_page_token="next_page_token_value", + unreachable=["unreachable_value"], + ) + ) + await client.list_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.ListClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_cluster_empty_call_grpc_asyncio(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + hypercompute_cluster.Cluster( + name="name_value", + description="description_value", + reconciling=True, + ) + ) + await client.get_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.GetClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_cluster_empty_call_grpc_asyncio(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.CreateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_cluster_empty_call_grpc_asyncio(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.UpdateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_cluster_empty_call_grpc_asyncio(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.delete_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.DeleteClusterRequest() + + assert args[0] == request_msg + + +def test_transport_kind_rest(): + transport = HypercomputeClusterClient.get_transport_class("rest")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "rest" + + +def test_list_clusters_rest_bad_request( + request_type=hypercompute_cluster.ListClustersRequest, +): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_clusters(request) + + +@pytest.mark.parametrize( + "request_type", + [ + hypercompute_cluster.ListClustersRequest, + dict, + ], +) +def test_list_clusters_rest_call_success(request_type): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = hypercompute_cluster.ListClustersResponse( + next_page_token="next_page_token_value", + unreachable=["unreachable_value"], + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = hypercompute_cluster.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_clusters(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersPager) + assert response.next_page_token == "next_page_token_value" + assert response.unreachable == ["unreachable_value"] + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_clusters_rest_interceptors(null_interceptor): + transport = transports.HypercomputeClusterRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.HypercomputeClusterRestInterceptor(), + ) + client = HypercomputeClusterClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.HypercomputeClusterRestInterceptor, "post_list_clusters" + ) as post, mock.patch.object( + transports.HypercomputeClusterRestInterceptor, + "post_list_clusters_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.HypercomputeClusterRestInterceptor, "pre_list_clusters" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = hypercompute_cluster.ListClustersRequest.pb( + hypercompute_cluster.ListClustersRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = hypercompute_cluster.ListClustersResponse.to_json( + hypercompute_cluster.ListClustersResponse() + ) + req.return_value.content = return_value + + request = hypercompute_cluster.ListClustersRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = hypercompute_cluster.ListClustersResponse() + post_with_metadata.return_value = ( + hypercompute_cluster.ListClustersResponse(), + metadata, + ) + + client.list_clusters( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_cluster_rest_bad_request( + request_type=hypercompute_cluster.GetClusterRequest, +): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + hypercompute_cluster.GetClusterRequest, + dict, + ], +) +def test_get_cluster_rest_call_success(request_type): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = hypercompute_cluster.Cluster( + name="name_value", + description="description_value", + reconciling=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = hypercompute_cluster.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_cluster(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, hypercompute_cluster.Cluster) + assert response.name == "name_value" + assert response.description == "description_value" + assert response.reconciling is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_cluster_rest_interceptors(null_interceptor): + transport = transports.HypercomputeClusterRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.HypercomputeClusterRestInterceptor(), + ) + client = HypercomputeClusterClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.HypercomputeClusterRestInterceptor, "post_get_cluster" + ) as post, mock.patch.object( + transports.HypercomputeClusterRestInterceptor, "post_get_cluster_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.HypercomputeClusterRestInterceptor, "pre_get_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = hypercompute_cluster.GetClusterRequest.pb( + hypercompute_cluster.GetClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = hypercompute_cluster.Cluster.to_json( + hypercompute_cluster.Cluster() + ) + req.return_value.content = return_value + + request = hypercompute_cluster.GetClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = hypercompute_cluster.Cluster() + post_with_metadata.return_value = hypercompute_cluster.Cluster(), metadata + + client.get_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_create_cluster_rest_bad_request( + request_type=hypercompute_cluster.CreateClusterRequest, +): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + hypercompute_cluster.CreateClusterRequest, + dict, + ], +) +def test_create_cluster_rest_call_success(request_type): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["cluster"] = { + "name": "name_value", + "description": "description_value", + "labels": {}, + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "reconciling": True, + "network_resources": {}, + "storage_resources": {}, + "compute_resources": {}, + "orchestrator": { + "slurm": { + "login_nodes": { + "count": 553, + "zone": "zone_value", + "machine_type": "machine_type_value", + "startup_script": "startup_script_value", + "enable_os_login": True, + "enable_public_ips": True, + "labels": {}, + "storage_configs": [ + {"id": "id_value", "local_mount": "local_mount_value"} + ], + "instances": [{"instance": "instance_value"}], + "boot_disk": {"type_": "type__value", "size_gb": 739}, + }, + "node_sets": [ + { + "compute_instance": { + "startup_script": "startup_script_value", + "labels": {}, + "boot_disk": {}, + }, + "id": "id_value", + "compute_id": "compute_id_value", + "storage_configs": {}, + "static_node_count": 1813, + "max_dynamic_node_count": 2327, + } + ], + "partitions": [ + { + "id": "id_value", + "node_set_ids": ["node_set_ids_value1", "node_set_ids_value2"], + } + ], + "default_partition": "default_partition_value", + "prolog_bash_scripts": [ + "prolog_bash_scripts_value1", + "prolog_bash_scripts_value2", + ], + "epilog_bash_scripts": [ + "epilog_bash_scripts_value1", + "epilog_bash_scripts_value2", + ], + } + }, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = hypercompute_cluster.CreateClusterRequest.meta.fields["cluster"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_cluster(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_cluster_rest_interceptors(null_interceptor): + transport = transports.HypercomputeClusterRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.HypercomputeClusterRestInterceptor(), + ) + client = HypercomputeClusterClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.HypercomputeClusterRestInterceptor, "post_create_cluster" + ) as post, mock.patch.object( + transports.HypercomputeClusterRestInterceptor, + "post_create_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.HypercomputeClusterRestInterceptor, "pre_create_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = hypercompute_cluster.CreateClusterRequest.pb( + hypercompute_cluster.CreateClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = hypercompute_cluster.CreateClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.create_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_update_cluster_rest_bad_request( + request_type=hypercompute_cluster.UpdateClusterRequest, +): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "cluster": {"name": "projects/sample1/locations/sample2/clusters/sample3"} + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + hypercompute_cluster.UpdateClusterRequest, + dict, + ], +) +def test_update_cluster_rest_call_success(request_type): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "cluster": {"name": "projects/sample1/locations/sample2/clusters/sample3"} + } + request_init["cluster"] = { + "name": "projects/sample1/locations/sample2/clusters/sample3", + "description": "description_value", + "labels": {}, + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "reconciling": True, + "network_resources": {}, + "storage_resources": {}, + "compute_resources": {}, + "orchestrator": { + "slurm": { + "login_nodes": { + "count": 553, + "zone": "zone_value", + "machine_type": "machine_type_value", + "startup_script": "startup_script_value", + "enable_os_login": True, + "enable_public_ips": True, + "labels": {}, + "storage_configs": [ + {"id": "id_value", "local_mount": "local_mount_value"} + ], + "instances": [{"instance": "instance_value"}], + "boot_disk": {"type_": "type__value", "size_gb": 739}, + }, + "node_sets": [ + { + "compute_instance": { + "startup_script": "startup_script_value", + "labels": {}, + "boot_disk": {}, + }, + "id": "id_value", + "compute_id": "compute_id_value", + "storage_configs": {}, + "static_node_count": 1813, + "max_dynamic_node_count": 2327, + } + ], + "partitions": [ + { + "id": "id_value", + "node_set_ids": ["node_set_ids_value1", "node_set_ids_value2"], + } + ], + "default_partition": "default_partition_value", + "prolog_bash_scripts": [ + "prolog_bash_scripts_value1", + "prolog_bash_scripts_value2", + ], + "epilog_bash_scripts": [ + "epilog_bash_scripts_value1", + "epilog_bash_scripts_value2", + ], + } + }, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = hypercompute_cluster.UpdateClusterRequest.meta.fields["cluster"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_cluster(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_cluster_rest_interceptors(null_interceptor): + transport = transports.HypercomputeClusterRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.HypercomputeClusterRestInterceptor(), + ) + client = HypercomputeClusterClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.HypercomputeClusterRestInterceptor, "post_update_cluster" + ) as post, mock.patch.object( + transports.HypercomputeClusterRestInterceptor, + "post_update_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.HypercomputeClusterRestInterceptor, "pre_update_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = hypercompute_cluster.UpdateClusterRequest.pb( + hypercompute_cluster.UpdateClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = hypercompute_cluster.UpdateClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.update_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_delete_cluster_rest_bad_request( + request_type=hypercompute_cluster.DeleteClusterRequest, +): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + hypercompute_cluster.DeleteClusterRequest, + dict, + ], +) +def test_delete_cluster_rest_call_success(request_type): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_cluster(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_cluster_rest_interceptors(null_interceptor): + transport = transports.HypercomputeClusterRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.HypercomputeClusterRestInterceptor(), + ) + client = HypercomputeClusterClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.HypercomputeClusterRestInterceptor, "post_delete_cluster" + ) as post, mock.patch.object( + transports.HypercomputeClusterRestInterceptor, + "post_delete_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.HypercomputeClusterRestInterceptor, "pre_delete_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = hypercompute_cluster.DeleteClusterRequest.pb( + hypercompute_cluster.DeleteClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = hypercompute_cluster.DeleteClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.delete_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_location(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.GetLocationRequest, + dict, + ], +) +def test_get_location_rest(request_type): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.Location() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_location(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_list_locations_rest_bad_request( + request_type=locations_pb2.ListLocationsRequest, +): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type() + request = json_format.ParseDict({"name": "projects/sample1"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_locations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.ListLocationsRequest, + dict, + ], +) +def test_list_locations_rest(request_type): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + request_init = {"name": "projects/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_locations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_cancel_operation_rest_bad_request( + request_type=operations_pb2.CancelOperationRequest, +): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "{}" + response_value.content = json_return_value.encode("UTF-8") + + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_rest_bad_request( + request_type=operations_pb2.DeleteOperationRequest, +): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.DeleteOperationRequest, + dict, + ], +) +def test_delete_operation_rest(request_type): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "{}" + response_value.content = json_return_value.encode("UTF-8") + + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + request_type=operations_pb2.GetOperationRequest, +): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + request_type=operations_pb2.ListOperationsRequest, +): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_initialize_client_w_rest(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_clusters_empty_call_rest(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + client.list_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.ListClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_cluster_empty_call_rest(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + client.get_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.GetClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_cluster_empty_call_rest(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + client.create_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.CreateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_cluster_empty_call_rest(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + client.update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.UpdateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_cluster_empty_call_rest(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + client.delete_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = hypercompute_cluster.DeleteClusterRequest() + + assert args[0] == request_msg + + +def test_hypercompute_cluster_rest_lro_client(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have an api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.HypercomputeClusterGrpcTransport, + ) + + +def test_hypercompute_cluster_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.HypercomputeClusterTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_hypercompute_cluster_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.hypercomputecluster_v1.services.hypercompute_cluster.transports.HypercomputeClusterTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.HypercomputeClusterTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "list_clusters", + "get_cluster", + "create_cluster", + "update_cluster", + "delete_cluster", + "get_location", + "list_locations", + "get_operation", + "cancel_operation", + "delete_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_hypercompute_cluster_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.hypercomputecluster_v1.services.hypercompute_cluster.transports.HypercomputeClusterTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.HypercomputeClusterTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_hypercompute_cluster_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.hypercomputecluster_v1.services.hypercompute_cluster.transports.HypercomputeClusterTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.HypercomputeClusterTransport() + adc.assert_called_once() + + +def test_hypercompute_cluster_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + HypercomputeClusterClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.HypercomputeClusterGrpcTransport, + transports.HypercomputeClusterGrpcAsyncIOTransport, + ], +) +def test_hypercompute_cluster_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.HypercomputeClusterGrpcTransport, + transports.HypercomputeClusterGrpcAsyncIOTransport, + transports.HypercomputeClusterRestTransport, + ], +) +def test_hypercompute_cluster_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.HypercomputeClusterGrpcTransport, grpc_helpers), + (transports.HypercomputeClusterGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_hypercompute_cluster_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "hypercomputecluster.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="hypercomputecluster.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.HypercomputeClusterGrpcTransport, + transports.HypercomputeClusterGrpcAsyncIOTransport, + ], +) +def test_hypercompute_cluster_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_hypercompute_cluster_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.HypercomputeClusterRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_hypercompute_cluster_host_no_port(transport_name): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="hypercomputecluster.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "hypercomputecluster.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://hypercomputecluster.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_hypercompute_cluster_host_with_port(transport_name): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="hypercomputecluster.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "hypercomputecluster.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://hypercomputecluster.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_hypercompute_cluster_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = HypercomputeClusterClient( + credentials=creds1, + transport=transport_name, + ) + client2 = HypercomputeClusterClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.list_clusters._session + session2 = client2.transport.list_clusters._session + assert session1 != session2 + session1 = client1.transport.get_cluster._session + session2 = client2.transport.get_cluster._session + assert session1 != session2 + session1 = client1.transport.create_cluster._session + session2 = client2.transport.create_cluster._session + assert session1 != session2 + session1 = client1.transport.update_cluster._session + session2 = client2.transport.update_cluster._session + assert session1 != session2 + session1 = client1.transport.delete_cluster._session + session2 = client2.transport.delete_cluster._session + assert session1 != session2 + + +def test_hypercompute_cluster_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.HypercomputeClusterGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_hypercompute_cluster_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.HypercomputeClusterGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.filterwarnings("ignore::FutureWarning") +@pytest.mark.parametrize( + "transport_class", + [ + transports.HypercomputeClusterGrpcTransport, + transports.HypercomputeClusterGrpcAsyncIOTransport, + ], +) +def test_hypercompute_cluster_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.HypercomputeClusterGrpcTransport, + transports.HypercomputeClusterGrpcAsyncIOTransport, + ], +) +def test_hypercompute_cluster_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_hypercompute_cluster_grpc_lro_client(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_hypercompute_cluster_grpc_lro_async_client(): + client = HypercomputeClusterAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_bucket_path(): + project = "squid" + bucket = "clam" + expected = "projects/{project}/buckets/{bucket}".format( + project=project, + bucket=bucket, + ) + actual = HypercomputeClusterClient.bucket_path(project, bucket) + assert expected == actual + + +def test_parse_bucket_path(): + expected = { + "project": "whelk", + "bucket": "octopus", + } + path = HypercomputeClusterClient.bucket_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_bucket_path(path) + assert expected == actual + + +def test_cluster_path(): + project = "oyster" + location = "nudibranch" + cluster = "cuttlefish" + expected = "projects/{project}/locations/{location}/clusters/{cluster}".format( + project=project, + location=location, + cluster=cluster, + ) + actual = HypercomputeClusterClient.cluster_path(project, location, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "mussel", + "location": "winkle", + "cluster": "nautilus", + } + path = HypercomputeClusterClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_cluster_path(path) + assert expected == actual + + +def test_compute_instance_path(): + project = "scallop" + zone = "abalone" + instance = "squid" + expected = "projects/{project}/zones/{zone}/instances/{instance}".format( + project=project, + zone=zone, + instance=instance, + ) + actual = HypercomputeClusterClient.compute_instance_path(project, zone, instance) + assert expected == actual + + +def test_parse_compute_instance_path(): + expected = { + "project": "clam", + "zone": "whelk", + "instance": "octopus", + } + path = HypercomputeClusterClient.compute_instance_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_compute_instance_path(path) + assert expected == actual + + +def test_disk_type_path(): + project = "oyster" + zone = "nudibranch" + disk_type = "cuttlefish" + expected = "projects/{project}/zones/{zone}/diskTypes/{disk_type}".format( + project=project, + zone=zone, + disk_type=disk_type, + ) + actual = HypercomputeClusterClient.disk_type_path(project, zone, disk_type) + assert expected == actual + + +def test_parse_disk_type_path(): + expected = { + "project": "mussel", + "zone": "winkle", + "disk_type": "nautilus", + } + path = HypercomputeClusterClient.disk_type_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_disk_type_path(path) + assert expected == actual + + +def test_file_instance_path(): + project = "scallop" + location = "abalone" + instance = "squid" + expected = "projects/{project}/locations/{location}/instances/{instance}".format( + project=project, + location=location, + instance=instance, + ) + actual = HypercomputeClusterClient.file_instance_path(project, location, instance) + assert expected == actual + + +def test_parse_file_instance_path(): + expected = { + "project": "clam", + "location": "whelk", + "instance": "octopus", + } + path = HypercomputeClusterClient.file_instance_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_file_instance_path(path) + assert expected == actual + + +def test_instance_path(): + project = "oyster" + location = "nudibranch" + instance = "cuttlefish" + expected = "projects/{project}/locations/{location}/instances/{instance}".format( + project=project, + location=location, + instance=instance, + ) + actual = HypercomputeClusterClient.instance_path(project, location, instance) + assert expected == actual + + +def test_parse_instance_path(): + expected = { + "project": "mussel", + "location": "winkle", + "instance": "nautilus", + } + path = HypercomputeClusterClient.instance_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_instance_path(path) + assert expected == actual + + +def test_network_path(): + project = "scallop" + network = "abalone" + expected = "projects/{project}/global/networks/{network}".format( + project=project, + network=network, + ) + actual = HypercomputeClusterClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "squid", + "network": "clam", + } + path = HypercomputeClusterClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_network_path(path) + assert expected == actual + + +def test_reservation_path(): + project = "whelk" + zone = "octopus" + reservation = "oyster" + expected = "projects/{project}/zones/{zone}/reservations/{reservation}".format( + project=project, + zone=zone, + reservation=reservation, + ) + actual = HypercomputeClusterClient.reservation_path(project, zone, reservation) + assert expected == actual + + +def test_parse_reservation_path(): + expected = { + "project": "nudibranch", + "zone": "cuttlefish", + "reservation": "mussel", + } + path = HypercomputeClusterClient.reservation_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_reservation_path(path) + assert expected == actual + + +def test_subnetwork_path(): + project = "winkle" + region = "nautilus" + subnetwork = "scallop" + expected = "projects/{project}/regions/{region}/subnetworks/{subnetwork}".format( + project=project, + region=region, + subnetwork=subnetwork, + ) + actual = HypercomputeClusterClient.subnetwork_path(project, region, subnetwork) + assert expected == actual + + +def test_parse_subnetwork_path(): + expected = { + "project": "abalone", + "region": "squid", + "subnetwork": "clam", + } + path = HypercomputeClusterClient.subnetwork_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_subnetwork_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = HypercomputeClusterClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = HypercomputeClusterClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = HypercomputeClusterClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = HypercomputeClusterClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = HypercomputeClusterClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = HypercomputeClusterClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format( + project=project, + ) + actual = HypercomputeClusterClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = HypercomputeClusterClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = HypercomputeClusterClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = HypercomputeClusterClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = HypercomputeClusterClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.HypercomputeClusterTransport, "_prep_wrapped_messages" + ) as prep: + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.HypercomputeClusterTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = HypercomputeClusterClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_delete_operation(transport: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_field_headers(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_delete_operation_from_dict(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_list_locations_field_headers(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_locations_from_dict(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_get_location_field_headers(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = HypercomputeClusterAsyncClient(credentials=async_anonymous_credentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +def test_get_location_from_dict(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_transport_close_grpc(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + with mock.patch.object( + type(getattr(client.transport, "_grpc_channel")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +@pytest.mark.asyncio +async def test_transport_close_grpc_asyncio(): + client = HypercomputeClusterAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + with mock.patch.object( + type(getattr(client.transport, "_grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close_rest(): + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + with mock.patch.object( + type(getattr(client.transport, "_session")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = HypercomputeClusterClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (HypercomputeClusterClient, transports.HypercomputeClusterGrpcTransport), + ( + HypercomputeClusterAsyncClient, + transports.HypercomputeClusterGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + )